Kaynağa Gözat

map key to stack

Tobias Waldekranz.com 8 yıl önce
ebeveyn
işleme
feb3046607
5 değiştirilmiş dosya ile 325 ekleme ve 164 silme
  1. 136 30
      global.c
  2. 113 69
      ir.c
  3. 37 25
      ir.h
  4. 1 2
      kprobe.c
  5. 38 38
      ply.c

+ 136 - 30
global.c

@@ -53,42 +53,23 @@ static int global_dot_ir_post(const struct func *func, struct node *n,
53 53
 		struct node *ptr = sou->expr.args;
54 54
 
55 55
 		ir_emit_sym_to_reg(prog->ir, BPF_REG_3, ptr->sym);
56
-		ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, 0, offset), BPF_REG_3, 0);
56
+		ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, offset), BPF_REG_3, 0);
57 57
 		goto probe_read;
58 58
 	}
59 59
 
60 60
 	offset += sou->sym->irs.stack;
61 61
 
62 62
 	if (dst->loc == LOC_REG) {
63
-		switch (dst->size) {
64
-		case 1:
65
-			ir_emit_insn(prog->ir, LDXB(0, offset, 0),
66
-				     dst->reg, BPF_REG_BP);
67
-			break;
68
-		case 2:
69
-			ir_emit_insn(prog->ir, LDXH(0, offset, 0),
70
-				     dst->reg, BPF_REG_BP);
71
-			break;
72
-		case 4:
73
-			ir_emit_insn(prog->ir, LDXW(0, offset, 0),
74
-				     dst->reg, BPF_REG_BP);
75
-			break;
76
-		case 8:
77
-			ir_emit_insn(prog->ir, LDXDW(0, offset, 0),
78
-				     dst->reg, BPF_REG_BP);
79
-			break;
80
-		default:
81
-			assert(0);
82
-		}
83
-
63
+		ir_emit_insn(prog->ir, LDX(bpf_width(dst->size), offset),
64
+			     dst->reg, BPF_REG_BP);
84 65
 		return 0;
85 66
 	}
86 67
 
87
-	ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, 0, offset), BPF_REG_3, 0);
68
+	ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, offset), BPF_REG_3, 0);
88 69
 probe_read:
89
-	ir_emit_insn(prog->ir, MOV_IMM(0, (int32_t)dst->size), BPF_REG_2, 0);
90
-	ir_emit_insn(prog->ir, MOV(0, 0), BPF_REG_1, BPF_REG_BP);
91
-	ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, 0, dst->stack), BPF_REG_1, 0);
70
+	ir_emit_insn(prog->ir, MOV_IMM((int32_t)dst->size), BPF_REG_2, 0);
71
+	ir_emit_insn(prog->ir, MOV, BPF_REG_1, BPF_REG_BP);
72
+	ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, dst->stack), BPF_REG_1, 0);
92 73
 	ir_emit_insn(prog->ir, CALL(BPF_FUNC_probe_read), 0, 0);
93 74
 	/* TODO if (r0) exit(r0); */
94 75
 	return 0;
@@ -154,9 +135,9 @@ static int global_deref_ir_post(const struct func *func, struct node *n,
154 135
                     be overwritten, so skip the load. */
155 136
 		return 0;
156 137
 
157
-	ir_emit_insn(prog->ir, MOV(0, 0), BPF_REG_1, BPF_REG_BP);
158
-	ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, 0, dst->stack), BPF_REG_1, 0);
159
-	ir_emit_insn(prog->ir, MOV_IMM(0, (int32_t)dst->size), BPF_REG_2, 0);
138
+	ir_emit_insn(prog->ir, MOV, BPF_REG_1, BPF_REG_BP);
139
+	ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, dst->stack), BPF_REG_1, 0);
140
+	ir_emit_insn(prog->ir, MOV_IMM((int32_t)dst->size), BPF_REG_2, 0);
160 141
 	ir_emit_sym_to_reg(prog->ir, BPF_REG_3, ptr->sym);
161 142
 	ir_emit_insn(prog->ir, CALL(BPF_FUNC_probe_read), 0, 0);
162 143
 	/* TODO if (r0) exit(r0); */
@@ -188,6 +169,79 @@ static int global_deref_type_infer(const struct func *func, struct node *n)
188 169
 
189 170
 /* :map */
190 171
 
172
+static int global_map_ir_pre_key(struct node *n, struct prog *prog)
173
+{
174
+	struct node *map = n->expr.args, *arg;
175
+	struct type *ktype = type_base(map->sym->type->map.ktype);
176
+	ssize_t stack = map->sym->irs.stack;
177
+	size_t offset, size, pad;
178
+	struct tfield *f;
179
+
180
+	arg = map->next;
181
+	tfields_foreach(f, ktype->sou.fields) {
182
+		offset = type_offsetof(ktype, f->name);
183
+		size = type_sizeof(f->type);
184
+
185
+		if (!arg->sym->irs.loc) {
186
+			arg->sym->irs.hint.stack = 1;
187
+			arg->sym->irs.stack = stack + offset;
188
+		}
189
+
190
+		if (arg->next) {
191
+			pad = type_offsetof(ktype, f[1].name) - (offset + size);
192
+			if (pad)
193
+				ir_emit_bzero(prog->ir,
194
+					      stack + offset + size, pad);
195
+		}
196
+		arg = arg->next;
197
+	}
198
+
199
+	pad = type_sizeof(ktype) - (offset + size);
200
+	if (pad)
201
+		ir_emit_bzero(prog->ir, stack + offset + size, pad);
202
+	return 0;
203
+}
204
+
205
+static int global_map_ir_pre(const struct func *func, struct node *n,
206
+			     struct prog *prog)
207
+{
208
+	struct irstate *kirs;
209
+	struct node *map = n->expr.args;
210
+	struct type *ktype = type_base(map->sym->type->map.ktype);
211
+
212
+	map->sym->irs.hint.stack = 1;
213
+	ir_init_irs(prog->ir, &map->sym->irs, ktype);
214
+
215
+
216
+	if (ktype->ttype == T_STRUCT)
217
+		return global_map_ir_pre_key(n, prog);
218
+
219
+	kirs = &map->next->sym->irs;
220
+	if (!kirs->loc) {
221
+		kirs->hint.stack = 1;
222
+		kirs->stack = map->sym->irs.stack;
223
+	}
224
+	return 0;
225
+}
226
+
227
+static int global_map_ir_post(const struct func *func, struct node *n,
228
+			      struct prog *prog)
229
+{
230
+	struct node *map = n->expr.args, *arg;
231
+	struct type *ktype = type_base(map->sym->type->map.ktype);
232
+	ssize_t stack = map->sym->irs.stack;
233
+	size_t offset;
234
+	struct tfield *f;
235
+
236
+	arg = map->next;
237
+	tfields_foreach(f, ktype->sou.fields) {
238
+		offset = type_offsetof(ktype, f->name);
239
+		ir_emit_sym_to_stack(prog->ir, stack + offset, arg->sym);
240
+		arg = arg->next;
241
+	}
242
+	return 0;
243
+}
244
+
191 245
 static struct type *global_map_ktype(struct node *n)
192 246
 {
193 247
 	struct node *map, *key;
@@ -370,7 +424,7 @@ static int global_pid_ir_post(const struct func *func, struct node *n,
370 424
 	ir_init_sym(prog->ir, n->sym);
371 425
 
372 426
 	ir_emit_insn(prog->ir, CALL(BPF_FUNC_get_current_pid_tgid), 0, 0);
373
-	ir_emit_insn(prog->ir, ALU64_IMM(BPF_RSH, 0, 32), BPF_REG_0, 0);
427
+	ir_emit_insn(prog->ir, ALU64_IMM(BPF_RSH, 32), BPF_REG_0, 0);
374 428
 	ir_emit_reg_to_sym(prog->ir, n->sym, BPF_REG_0);
375 429
 	return 0;
376 430
 }
@@ -414,6 +468,9 @@ struct type t_time_func = {
414 468
 	.func = { .type = &t_time },
415 469
 };
416 470
 
471
+
472
+/*  */
473
+
417 474
 struct type t_block_func = {
418 475
 	.ttype = T_FUNC,
419 476
 
@@ -515,6 +572,9 @@ static const struct func global_funcs[] = {
515 572
 		/* .type = t_map_func, */
516 573
 		.type_infer = global_map_type_infer,
517 574
 		.static_validate = global_map_static_validate,
575
+
576
+		.ir_pre  = global_map_ir_pre,
577
+		.ir_post = global_map_ir_post,
518 578
 	},
519 579
 
520 580
 	{
@@ -569,8 +629,54 @@ static struct type *global_num_type(struct node *n)
569 629
 	return NULL;
570 630
 }
571 631
 
632
+static int global_num_ir_post(const struct func *func, struct node *n,
633
+			      struct prog *prog)
634
+{
635
+	struct irstate *irs = &n->sym->irs;
636
+
637
+	if ((n->num.unsignd && (n->num.u64 <= INT32_MAX)) ||
638
+	    (n->num.s64 >= INT32_MIN && n->num.s64 <= INT32_MAX)) {
639
+		irs->loc = LOC_IMM;
640
+		irs->imm = n->num.s64;
641
+		irs->size = 4;
642
+		return 0;
643
+	}
644
+
645
+	/* we need to build the constant in a register, so ignore any
646
+	 * advise about stack allocation. */
647
+	irs->hint.stack = 0;
648
+
649
+	ir_init_sym(prog->ir, n->sym);
650
+
651
+	if (n->num.u64 > 0x3fffffffffffffff) {
652
+		ir_emit_insn(prog->ir, MOV64_IMM(n->num.u64 >> 33), irs->reg, 0);
653
+		ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 31), irs->reg, 0);
654
+
655
+		if ((n->num.u64 >> 2) & 0x7fffffff)
656
+			ir_emit_insn(prog->ir,
657
+				     ALU64_IMM(BPF_OR, (n->num.u64 >> 2) & 0x7fffffff),
658
+				     irs->reg, 0);
659
+		ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 2), irs->reg, 0);
660
+
661
+		if (n->num.u64 & 0x3)
662
+			ir_emit_insn(prog->ir, ALU64_IMM(BPF_OR, n->num.u64 & 0x3),
663
+				     irs->reg, 0);
664
+	} else if (n->num.u64 > 0x7fffffff) {
665
+		ir_emit_insn(prog->ir, MOV64_IMM(n->num.u64 >> 31), irs->reg, 0);
666
+		ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 31), irs->reg, 0);
667
+
668
+		if (n->num.u64 & 0x7fffffff)
669
+			ir_emit_insn(prog->ir,
670
+				     ALU64_IMM(BPF_OR, n->num.u64 & 0x7fffffff),
671
+				     irs->reg, 0);
672
+	}
673
+	return 0;
674
+}
675
+
572 676
 static const struct func global_num_func = {
573 677
 	.name = ":num",
678
+
679
+	.ir_post = global_num_ir_post,
574 680
 };
575 681
 
576 682
 static const struct func global_string_func = {

+ 113 - 69
ir.c

@@ -314,56 +314,26 @@ void ir_emit_label  (struct ir *ir, int16_t label)
314 314
 	ir_emit(ir, &vi);
315 315
 }
316 316
 
317
-/* void ir_emit_reg_get(struct ir *ir, uint16_t reg) */
318
-/* { */
319
-/* 	struct vinsn vi; */
320
-
321
-/* 	vi.vitype = VI_REG_GET; */
322
-/* 	vi.reg = reg; */
323
-/* 	ir_emit(ir, &vi); */
324
-/* } */
325
-
326
-/* void ir_emit_reg_put(struct ir *ir, uint16_t reg) */
327
-/* { */
328
-/* 	struct vinsn vi; */
329
-
330
-/* 	vi.vitype = VI_REG_PUT; */
331
-/* 	vi.reg = reg; */
332
-/* 	ir_emit(ir, &vi); */
333
-/* } */
334
-
335 317
 void ir_emit_sym_to_reg(struct ir *ir, uint16_t dst, struct sym *src)
336 318
 {
337 319
 	struct irstate *irs = &src->irs;
338 320
 
339 321
 	switch (irs->loc) {
340 322
 	case LOC_IMM:
341
-		ir_emit_insn(ir, MOV_IMM(0, irs->imm), dst, 0);
323
+		ir_emit_insn(ir, MOV_IMM(irs->imm), dst, 0);
342 324
 		break;
343 325
 	case LOC_REG:
344
-		ir_emit_insn(ir, MOV(0, 0), dst, irs->reg);
326
+		if (dst == irs->reg)
327
+			break;
328
+
329
+		if (irs->size == 8)
330
+			ir_emit_insn(ir, MOV64, dst, irs->reg);
331
+		else
332
+			ir_emit_insn(ir, MOV, dst, irs->reg);
345 333
 		break;
346 334
 	case LOC_STACK:
347
-		switch (irs->size) {
348
-		case 1:
349
-			ir_emit_insn(ir, LDXB(0, irs->stack, 0),
350
-				     dst, BPF_REG_BP);
351
-			break;
352
-		case 2:
353
-			ir_emit_insn(ir, LDXH(0, irs->stack, 0),
354
-				     dst, BPF_REG_BP);
355
-			break;
356
-		case 4:
357
-			ir_emit_insn(ir, LDXW(0, irs->stack, 0),
358
-				     dst, BPF_REG_BP);
359
-			break;
360
-		case 8:
361
-			ir_emit_insn(ir, LDXDW(0, irs->stack, 0),
362
-				     dst, BPF_REG_BP);
363
-			break;
364
-		default:
365
-			assert(0);
366
-		}
335
+		ir_emit_insn(ir, LDX(bpf_width(irs->size), irs->stack),
336
+			     dst, BPF_REG_BP);
367 337
 		break;
368 338
 	default:
369 339
 		assert(0);
@@ -376,36 +346,97 @@ void ir_emit_reg_to_sym(struct ir *ir, struct sym *dst, uint16_t src)
376 346
 
377 347
 	switch (irs->loc) {
378 348
 	case LOC_REG:
379
-		ir_emit_insn(ir, MOV(0, 0), irs->reg, src);
349
+		if (irs->reg == src)
350
+			break;
351
+
352
+		if (irs->size == 8)
353
+			ir_emit_insn(ir, MOV64, irs->reg, src);
354
+		else
355
+			ir_emit_insn(ir, MOV, irs->reg, src);
380 356
 		break;
381 357
 	case LOC_STACK:
382
-		switch (irs->size) {
383
-		case 1:
384
-			ir_emit_insn(ir, STXB(0, irs->stack, 0),
385
-				     BPF_REG_BP, src);
386
-			break;
387
-		case 2:
388
-			ir_emit_insn(ir, STXH(0, irs->stack, 0),
389
-				     BPF_REG_BP, src);
390
-			break;
391
-		case 4:
392
-			ir_emit_insn(ir, STXW(0, irs->stack, 0),
393
-				     BPF_REG_BP, src);
394
-			break;
395
-		case 8:
396
-			ir_emit_insn(ir, STXDW(0, irs->stack, 0),
397
-				     BPF_REG_BP, src);
398
-			break;
399
-		default:
400
-			assert(0);
401
-		}
358
+		ir_emit_insn(ir, STX(bpf_width(irs->size), irs->stack),
359
+			     BPF_REG_BP, src);
402 360
 		break;
403 361
 	default:
404 362
 		assert(0);
405 363
 	}
406 364
 }
365
+#include "ply.h"
366
+void ir_emit_sym_to_stack(struct ir *ir, ssize_t offset, struct sym *src)
367
+{
368
+	struct irstate *irs = &src->irs;
407 369
 
408
-/* void ir_emit_xfer(struct ir *ir,  */
370
+	switch (irs->loc) {
371
+	case LOC_IMM:
372
+		_d("WKZ %zu @ %zd (%d)\n", irs->size, offset, irs->imm);
373
+		ir_emit_insn(ir, ST_IMM(bpf_width(irs->size), offset, irs->imm),
374
+			     BPF_REG_BP, 0);
375
+		break;
376
+	case LOC_REG:
377
+		ir_emit_insn(ir, STX(bpf_width(irs->size), offset),
378
+			     BPF_REG_BP, irs->reg);
379
+	case LOC_STACK:
380
+		ir_emit_memcpy(ir, offset, irs->stack, irs->size);
381
+		break;
382
+	default:
383
+		assert(0);
384
+	}
385
+}
386
+
387
+void ir_emit_memcpy(struct ir *ir, ssize_t dst, ssize_t src, size_t size)
388
+{
389
+	if (dst == src)
390
+		return;
391
+
392
+	for (; size >= 8; size -= 8, dst += 8, src += 8) {
393
+		ir_emit_insn(ir, LDX(BPF_DW, src), BPF_REG_0, BPF_REG_BP);
394
+		ir_emit_insn(ir, STX(BPF_DW, dst), BPF_REG_BP, BPF_REG_0);
395
+	}
396
+
397
+	if (size >= 4) {
398
+		ir_emit_insn(ir, LDX(BPF_W, src), BPF_REG_0, BPF_REG_BP);
399
+		ir_emit_insn(ir, STX(BPF_W, dst), BPF_REG_BP, BPF_REG_0);
400
+		size -= 4, dst += 4, src += 4;
401
+	}
402
+
403
+	if (size >= 2) {
404
+		ir_emit_insn(ir, LDX(BPF_H, src), BPF_REG_0, BPF_REG_BP);
405
+		ir_emit_insn(ir, STX(BPF_H, dst), BPF_REG_BP, BPF_REG_0);
406
+		size -= 2, dst += 2, src += 2;
407
+	}
408
+
409
+	if (size >= 1) {
410
+		ir_emit_insn(ir, LDX(BPF_B, src), BPF_REG_0, BPF_REG_BP);
411
+		ir_emit_insn(ir, STX(BPF_B, dst), BPF_REG_BP, BPF_REG_0);
412
+		size -= 1, dst += 1, src += 1;
413
+	}
414
+
415
+	assert(size == 0);
416
+}
417
+
418
+void ir_emit_bzero(struct ir *ir, ssize_t offset, size_t size)
419
+{
420
+	for (; size >= 8; size -= 8)
421
+		ir_emit_insn(ir, ST_IMM(BPF_DW, offset, 0), BPF_REG_BP, 0);
422
+
423
+	if (size >= 4) {
424
+		ir_emit_insn(ir, ST_IMM(BPF_W, offset, 0), BPF_REG_BP, 0);
425
+		size -= 4;
426
+	}
427
+
428
+	if (size >= 2) {
429
+		ir_emit_insn(ir, ST_IMM(BPF_H, offset, 0), BPF_REG_BP, 0);
430
+		size -= 2;
431
+	}
432
+
433
+	if (size >= 1) {
434
+		ir_emit_insn(ir, ST_IMM(BPF_B, offset, 0), BPF_REG_BP, 0);
435
+		size -= 1;
436
+	}
437
+
438
+	assert(size == 0);
439
+}
409 440
 
410 441
 int16_t ir_alloc_label (struct ir *ir)
411 442
 {
@@ -417,26 +448,34 @@ uint16_t ir_alloc_register(struct ir *ir)
417 448
 	return ir->next_reg++;
418 449
 }
419 450
 
420
-ssize_t ir_alloc_stack(struct ir *ir, size_t size)
451
+ssize_t ir_alloc_stack(struct ir *ir, size_t size, size_t align)
421 452
 {
422 453
 	ir->sp -= size;
423
-	assert(ir->sp > INT32_MIN);
454
+
455
+	if (ir->sp % align)
456
+		ir->sp -= align - (ir->sp & align);
457
+
458
+	assert(ir->sp > INT16_MIN);
424 459
 
425 460
 	return ir->sp;
426 461
 }
427 462
 
428
-void ir_init_sym(struct ir *ir, struct sym *sym)
463
+void ir_init_irs(struct ir *ir, struct irstate *irs, struct type *t)
429 464
 {
430
-	struct irstate *irs = &sym->irs;
431
-	struct type *t = type_base(sym->type);
465
+	t = type_base(t);
432 466
 
433 467
 	if (irs->loc)
434 468
 		return;
435 469
 
436 470
 	irs->size = type_sizeof(t);
437 471
 
438
-	if (irs->hint.stack)
472
+	if (irs->hint.stack) {
473
+		if (irs->stack) {
474
+			irs->loc = LOC_STACK;
475
+			return;
476
+		}
439 477
 		goto alloc_stack;
478
+	}
440 479
 
441 480
 	switch (t->ttype) {
442 481
 	case T_SCALAR:
@@ -448,10 +487,15 @@ void ir_init_sym(struct ir *ir, struct sym *sym)
448 487
 	default:
449 488
 	alloc_stack:
450 489
 		irs->loc = LOC_STACK;
451
-		irs->stack = ir_alloc_stack(ir, irs->size);
490
+		irs->stack = ir_alloc_stack(ir, irs->size, type_alignof(t));
452 491
 	}
453 492
 }
454 493
 
494
+void ir_init_sym(struct ir *ir, struct sym *sym)
495
+{
496
+	return ir_init_irs(ir, &sym->irs, sym->type);
497
+}
498
+
455 499
 struct ir *ir_new(void)
456 500
 {
457 501
 	struct ir *ir;

+ 37 - 25
ir.h

@@ -17,46 +17,56 @@
17 17
 		.imm   = _imm			\
18 18
 	})
19 19
 
20
-#define MOV(_dst, _src)     INSN(BPF_ALU64 | BPF_MOV | BPF_X, _dst, _src, 0, 0)
21
-#define MOV_IMM(_dst, _imm) INSN(BPF_ALU64 | BPF_MOV | BPF_K, _dst, 0, 0, _imm)
20
+#define MOV32     INSN(BPF_ALU | BPF_MOV | BPF_X, 0, 0, 0, 0)
21
+#define MOV32_IMM(_imm) INSN(BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, _imm)
22
+
23
+#define MOV64     INSN(BPF_ALU64 | BPF_MOV | BPF_X, 0, 0, 0, 0)
24
+#define MOV64_IMM(_imm) INSN(BPF_ALU64 | BPF_MOV | BPF_K, 0, 0, 0, _imm)
22 25
 
23 26
 #define EXIT INSN(BPF_JMP | BPF_EXIT, 0, 0, 0, 0)
24 27
 #define CALL(_imm) INSN(BPF_JMP | BPF_CALL, 0, 0, 0, _imm)
25
-#define JMP(_op, _dst, _src, _off)     INSN(BPF_JMP | BPF_OP((_op)) | BPF_X, _dst, _src, _off, 0)
26
-#define JMP_IMM(_op, _dst, _imm, _off) INSN(BPF_JMP | BPF_OP((_op)) | BPF_K, _dst, 0, _off, _imm)
27 28
 
28
-#define ALU32(_op, _dst, _src)     INSN(BPF_ALU | BPF_OP((_op)) | BPF_X, _dst, _src, 0, 0)
29
-#define ALU32_IMM(_op, _dst, _imm) INSN(BPF_ALU | BPF_OP((_op)) | BPF_K, _dst, 0, 0, _imm)
29
+#define JMP(_op, _off)     INSN(BPF_JMP | BPF_OP((_op)) | BPF_X, 0, 0, _off, 0)
30
+#define JMP_IMM(_op, _imm, _off) INSN(BPF_JMP | BPF_OP((_op)) | BPF_K, 0, 0, _off, _imm)
30 31
 
31
-#define ALU64(_op, _dst, _src)     INSN(BPF_ALU64 | BPF_OP((_op)) | BPF_X, _dst, _src, 0, 0)
32
-#define ALU64_IMM(_op, _dst, _imm) INSN(BPF_ALU64 | BPF_OP((_op)) | BPF_K, _dst, 0, 0, _imm)
32
+#define ALU32(_op)     INSN(BPF_ALU | BPF_OP((_op)) | BPF_X, 0, 0, 0, 0)
33
+#define ALU32_IMM(_op, _imm) INSN(BPF_ALU | BPF_OP((_op)) | BPF_K, 0, 0, 0, _imm)
33 34
 
34
-#define STW_IMM(_dst, _off, _imm) INSN(BPF_ST  | BPF_SIZE(BPF_W)  | BPF_MEM, _dst, 0, _off, _imm)
35
-#define STXB(_dst, _off, _src)   INSN(BPF_STX | BPF_SIZE(BPF_B) | BPF_MEM, _dst, _src, _off, 0)
36
-#define STXH(_dst, _off, _src)   INSN(BPF_STX | BPF_SIZE(BPF_H) | BPF_MEM, _dst, _src, _off, 0)
37
-#define STXW(_dst, _off, _src)   INSN(BPF_STX | BPF_SIZE(BPF_W) | BPF_MEM, _dst, _src, _off, 0)
38
-#define STXDW(_dst, _off, _src)   INSN(BPF_STX | BPF_SIZE(BPF_DW) | BPF_MEM, _dst, _src, _off, 0)
35
+#define ALU64(_op)     INSN(BPF_ALU64 | BPF_OP((_op)) | BPF_X, 0, 0, 0, 0)
36
+#define ALU64_IMM(_op, _imm) INSN(BPF_ALU64 | BPF_OP((_op)) | BPF_K, 0, 0, 0, _imm)
39 37
 
40
-#define LDXB(_dst, _off, _src)  INSN(BPF_LDX | BPF_SIZE(BPF_B)  | BPF_MEM, _dst, _src, _off, 0)
41
-#define LDXH(_dst, _off, _src)  INSN(BPF_LDX | BPF_SIZE(BPF_H)  | BPF_MEM, _dst, _src, _off, 0)
42
-#define LDXW(_dst, _off, _src)  INSN(BPF_LDX | BPF_SIZE(BPF_W)  | BPF_MEM, _dst, _src, _off, 0)
43
-#define LDXDW(_dst, _off, _src) INSN(BPF_LDX | BPF_SIZE(BPF_DW) | BPF_MEM, _dst, _src, _off, 0)
38
+#define ST_IMM(_width, _off, _imm) INSN(BPF_ST | BPF_SIZE(_width) | BPF_MEM, 0, 0, _off, _imm)
39
+#define STX(_width, _off) INSN(BPF_STX | BPF_SIZE(_width) | BPF_MEM, 0, 0, _off, 0)
40
+#define LDX(_width, _off) INSN(BPF_LDX | BPF_SIZE(_width) | BPF_MEM, 0, 0, _off, 0)
44 41
 
45 42
 #if __WORDSIZE == 64
43
+#  define MOV MOV64
44
+#  define MOV_IMM MOV64_IMM
46 45
 #  define ALU ALU64
47 46
 #  define ALU_IMM ALU64_IMM
48
-#  define STX STXDW
49
-#  define LDX LDXDW
50 47
 #else
48
+#  define MOV MOV32
49
+#  define MOV_IMM MOV32_IMM
51 50
 #  define ALU ALU32
52 51
 #  define ALU_IMM ALU32_IMM
53
-#  define STX STXW
54
-#  define LDX LDXW
55 52
 #endif
56 53
 
57 54
 #define BPF_REG_BP BPF_REG_10
58 55
 
56
+static inline int bpf_width(size_t size)
57
+{
58
+	switch (size) {
59
+	case 1: return BPF_B;
60
+	case 2: return BPF_H;
61
+	case 4: return BPF_W;
62
+	case 8: return BPF_DW;
63
+	}
64
+
65
+	return -1;
66
+}
67
+
59 68
 struct sym;
69
+struct type;
60 70
 
61 71
 enum vitype {
62 72
 	VI_INSN,
@@ -123,18 +133,20 @@ void vinsn_dump(struct vinsn *vi, FILE *fp);
123 133
 void ir_dump(struct ir *ir, FILE *fp);
124 134
 
125 135
 int16_t  ir_alloc_label   (struct ir *ir);
126
-uint16_t ir_alloc_register(struct ir *ir);
127
-ssize_t  ir_alloc_stack   (struct ir *ir, size_t size);
128 136
 
137
+void ir_init_irs(struct ir *ir, struct irstate *irs, struct type *t);
129 138
 void ir_init_sym(struct ir *ir, struct sym *sym);
130 139
 
131 140
 void ir_emit_insn   (struct ir *ir, struct bpf_insn bpf, uint16_t dst, uint16_t src);
132 141
 void ir_emit_ldmap  (struct ir *ir, uint16_t dst, struct sym *map);
133 142
 void ir_emit_label  (struct ir *ir, int16_t label);
134
-/* void ir_emit_reg_get(struct ir *ir, uint16_t reg); */
135
-/* void ir_emit_reg_put(struct ir *ir, uint16_t reg); */
143
+
136 144
 void ir_emit_sym_to_reg(struct ir *ir, uint16_t dst, struct sym *src);
137 145
 void ir_emit_reg_to_sym(struct ir *ir, struct sym *dst, uint16_t src);
146
+void ir_emit_sym_to_stack(struct ir *ir, ssize_t offset, struct sym *src);
147
+
148
+void ir_emit_memcpy(struct ir *ir, ssize_t dst, ssize_t src, size_t size);
149
+void ir_emit_bzero(struct ir *ir, ssize_t offset, size_t size);
138 150
 
139 151
 struct ir *ir_new(void);
140 152
 

+ 1 - 2
kprobe.c

@@ -94,8 +94,7 @@ static int kprobe_ir_pre(struct prog *prog)
94 94
 			 * pt_regs struct, which ply denotes as
95 95
 			 * 'regs'. if we're using it we need to get a
96 96
 			 * reference to it before it is clobbered. */
97
-			ir_emit_insn(prog->ir, MOV(0, 0),
98
-				     (*sym)->irs.reg, BPF_REG_1);
97
+			ir_emit_insn(prog->ir, MOV, (*sym)->irs.reg, BPF_REG_1);
99 98
 		}
100 99
 	}
101 100
 

+ 38 - 38
ply.c

@@ -85,18 +85,18 @@ struct ctx *ctx_get(void)
85 85
 			  node_expr("=",
86 86
 				    node_expr("{}",
87 87
 					      node_ident("t"),
88
-					      node_num("0"),
89 88
 					      node_expr("pid", NULL),
89
+					      node_num("0x10000000"),
90 90
 					      NULL),
91 91
 				    node_expr("time", NULL),
92 92
 				    NULL),
93
-			  node_expr("=",
94
-				    node_expr("{}",
95
-					      node_ident("reads"),
96
-					      node_expr("pid", NULL),
97
-					      NULL),
98
-				    node_expr("quantize", node_ident("arg2"), NULL),
99
-				    NULL),
93
+			  /* node_expr("=", */
94
+			  /* 	    node_expr("{}", */
95
+			  /* 		      node_ident("reads"), */
96
+			  /* 		      node_expr("pid", NULL), */
97
+			  /* 		      NULL), */
98
+			  /* 	    node_expr("quantize", node_ident("arg2"), NULL), */
99
+			  /* 	    NULL), */
100 100
 			  NULL);
101 101
 
102 102
 	prog->provider = provider_get("k");
@@ -105,36 +105,36 @@ struct ctx *ctx_get(void)
105 105
 	ctx->progs[0] = prog;
106 106
 
107 107
 	/* PROBE1 */
108
-	prog = calloc(1, sizeof(*prog));
109
-	prog->locals = calloc(1, sizeof(*prog->locals));
110
-	prog->globals = ctx->globals;
111
-
112
-	/* TODO: k -> kret */
113
-	prog->probe = "k:SyS_read2"; 
114
-	/* { times[pid()] = quantize(time() - t[0, pid()]) } */
115
-	prog->ast =
116
-		node_expr("=",
117
-			  node_expr("{}",
118
-				    node_ident("times"),
119
-				    node_expr("pid", NULL),
120
-				    NULL),
121
-			  node_expr("quantize",
122
-				    node_expr("-",
123
-					      node_expr("time", NULL),
124
-					      node_expr("{}",
125
-							node_ident("t"),
126
-							node_num("0"),
127
-							node_expr("time", NULL),
128
-							node_expr("pid", NULL),
129
-							NULL),
130
-					      NULL),
131
-				    NULL),
132
-			  NULL);
133
-
134
-	prog->provider = provider_get("k");
135
-	prog->provider->probe(prog);
136
-	prog->ir = ir_new();
137
-	ctx->progs[1] = prog;
108
+	/* prog = calloc(1, sizeof(*prog)); */
109
+	/* prog->locals = calloc(1, sizeof(*prog->locals)); */
110
+	/* prog->globals = ctx->globals; */
111
+
112
+	/* /\* TODO: k -> kret *\/ */
113
+	/* prog->probe = "k:SyS_read2";  */
114
+	/* /\* { times[pid()] = quantize(time() - t[0, pid()]) } *\/ */
115
+	/* prog->ast = */
116
+	/* 	node_expr("=", */
117
+	/* 		  node_expr("{}", */
118
+	/* 			    node_ident("times"), */
119
+	/* 			    node_expr("pid", NULL), */
120
+	/* 			    NULL), */
121
+	/* 		  node_expr("quantize", */
122
+	/* 			    node_expr("-", */
123
+	/* 				      node_expr("time", NULL), */
124
+	/* 				      node_expr("{}", */
125
+	/* 						node_ident("t"), */
126
+	/* 						node_num("0"), */
127
+	/* 						node_expr("time", NULL), */
128
+	/* 						node_expr("pid", NULL), */
129
+	/* 						NULL), */
130
+	/* 				      NULL), */
131
+	/* 			    NULL), */
132
+	/* 		  NULL); */
133
+
134
+	/* prog->provider = provider_get("k"); */
135
+	/* prog->provider->probe(prog); */
136
+	/* prog->ir = ir_new(); */
137
+	/* ctx->progs[1] = prog; */
138 138
 
139 139
 	return ctx;
140 140
 }