|
|
@@ -1,4 +1,5 @@
|
|
1
|
1
|
#include <assert.h>
|
|
|
2
|
+#include <errno.h>
|
|
2
|
3
|
#include <inttypes.h>
|
|
3
|
4
|
#include <stdio.h>
|
|
4
|
5
|
#include <string.h>
|
|
|
@@ -507,6 +508,9 @@ struct ir *ir_new(void)
|
|
507
|
508
|
return ir;
|
|
508
|
509
|
}
|
|
509
|
510
|
|
|
|
511
|
+
|
|
|
512
|
+/* ir->bpf generation */
|
|
|
513
|
+
|
|
510
|
514
|
static void ir_bpf_vreg_replace(struct ir *ir, struct vinsn *last, int reg)
|
|
511
|
515
|
{
|
|
512
|
516
|
struct vinsn *vi;
|
|
|
@@ -525,7 +529,7 @@ static void ir_bpf_vreg_replace(struct ir *ir, struct vinsn *last, int reg)
|
|
525
|
529
|
}
|
|
526
|
530
|
|
|
527
|
531
|
|
|
528
|
|
-int ir_bpf_registerize_one(struct ir *ir, struct vinsn *last)
|
|
|
532
|
+static int ir_bpf_registerize_one(struct ir *ir, struct vinsn *last)
|
|
529
|
533
|
{
|
|
530
|
534
|
struct vinsn *vi;
|
|
531
|
535
|
uint16_t clean = 0x3ff;
|
|
|
@@ -538,7 +542,8 @@ int ir_bpf_registerize_one(struct ir *ir, struct vinsn *last)
|
|
538
|
542
|
clean &= ~(1 << vi->insn.src);
|
|
539
|
543
|
if (!(vi->insn.dst & vreg_base))
|
|
540
|
544
|
clean &= ~(1 << vi->insn.dst);
|
|
541
|
|
- if (vi->insn.bpf.code == (BPF_JMP | BPF_CALL))
|
|
|
545
|
+ if ((BPF_CLASS(vi->insn.bpf.code) == BPF_JMP)
|
|
|
546
|
+ && (BPF_OP(vi->insn.bpf.code) == BPF_CALL))
|
|
542
|
547
|
clean &= ~BPF_REG_CALLER_SAVE;
|
|
543
|
548
|
}
|
|
544
|
549
|
|
|
|
@@ -555,7 +560,7 @@ int ir_bpf_registerize_one(struct ir *ir, struct vinsn *last)
|
|
555
|
560
|
return -1;
|
|
556
|
561
|
}
|
|
557
|
562
|
|
|
558
|
|
-int ir_bpf_registerize(struct ir *ir)
|
|
|
563
|
+static int ir_bpf_registerize(struct ir *ir)
|
|
559
|
564
|
{
|
|
560
|
565
|
struct vinsn *vi;
|
|
561
|
566
|
int err = 0;
|
|
|
@@ -576,6 +581,50 @@ int ir_bpf_registerize(struct ir *ir)
|
|
576
|
581
|
return err;
|
|
577
|
582
|
}
|
|
578
|
583
|
|
|
|
584
|
+static int ir_bpf_jmp_resolve_one(struct ir *ir, struct vinsn *jmp)
|
|
|
585
|
+{
|
|
|
586
|
+ struct vinsn *vi;
|
|
|
587
|
+ int off = 0;
|
|
|
588
|
+
|
|
|
589
|
+ for (vi = jmp + 1; vi < &ir->vi[ir->len - 1]; vi++) {
|
|
|
590
|
+ switch (vi->vitype) {
|
|
|
591
|
+ case VI_INSN:
|
|
|
592
|
+ off++;
|
|
|
593
|
+ break;
|
|
|
594
|
+ case VI_LABEL:
|
|
|
595
|
+ if (vi->label != jmp->insn.bpf.off)
|
|
|
596
|
+ break;
|
|
|
597
|
+
|
|
|
598
|
+ jmp->insn.bpf.off = off;
|
|
|
599
|
+ return 0;
|
|
|
600
|
+ default:
|
|
|
601
|
+ break;
|
|
|
602
|
+ }
|
|
|
603
|
+ }
|
|
|
604
|
+
|
|
|
605
|
+ return -ENOENT;
|
|
|
606
|
+}
|
|
|
607
|
+
|
|
|
608
|
+static int ir_bpf_jmp_resolve(struct ir *ir)
|
|
|
609
|
+{
|
|
|
610
|
+ struct vinsn *vi;
|
|
|
611
|
+ int err;
|
|
|
612
|
+
|
|
|
613
|
+ for (vi = ir->vi; vi < &ir->vi[ir->len - 1]; vi++) {
|
|
|
614
|
+ if (vi->vitype != VI_INSN)
|
|
|
615
|
+ continue;
|
|
|
616
|
+
|
|
|
617
|
+ if ((BPF_CLASS(vi->insn.bpf.code) == BPF_JMP)
|
|
|
618
|
+ && (vi->insn.bpf.off < 0)) {
|
|
|
619
|
+ err = ir_bpf_jmp_resolve_one(ir, vi);
|
|
|
620
|
+ if (err)
|
|
|
621
|
+ return err;
|
|
|
622
|
+ }
|
|
|
623
|
+ }
|
|
|
624
|
+
|
|
|
625
|
+ return 0;
|
|
|
626
|
+}
|
|
|
627
|
+
|
|
579
|
628
|
int ir_bpf_generate(struct ir *ir)
|
|
580
|
629
|
{
|
|
581
|
630
|
int err;
|
|
|
@@ -584,5 +633,13 @@ int ir_bpf_generate(struct ir *ir)
|
|
584
|
633
|
if (err)
|
|
585
|
634
|
return err;
|
|
586
|
635
|
|
|
|
636
|
+ /* no instructions will be added/removed to the program after
|
|
|
637
|
+ * this point, thus it is now safe to convert labeled jumps to
|
|
|
638
|
+ * fixed offsets. */
|
|
|
639
|
+
|
|
|
640
|
+ err = ir_bpf_jmp_resolve(ir);
|
|
|
641
|
+ if (err)
|
|
|
642
|
+ return err;
|
|
|
643
|
+
|
|
587
|
644
|
return 0;
|
|
588
|
645
|
}
|