A dynamic tracer for Linux

global.c 19KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. #define _GNU_SOURCE /* asprintf */
  2. #include <assert.h>
  3. #include <errno.h>
  4. #include <limits.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include "arch.h"
  8. #include "func.h"
  9. #include "node.h"
  10. #include "ply.h"
  11. #include "sym.h"
  12. #include "type.h"
  13. /* . */
  14. static int global_dot_ir_pre(const struct func *func, struct node *n,
  15. struct prog *prog)
  16. {
  17. struct node *sou = n->expr.args;
  18. if (node_is(sou, ":deref")) {
  19. /* (*ptr).member, if *ptr is not already loaded let it
  20. * know that we're only interested in one member */
  21. sou->sym->irs.hint.dot = 1;
  22. /* this also means we need to put ourselves on the
  23. * stack since data will be loaded via probe_read */
  24. n->sym->irs.hint.stack = 1;
  25. }
  26. return 0;
  27. }
  28. static int global_dot_ir_post(const struct func *func, struct node *n,
  29. struct prog *prog)
  30. {
  31. struct node *sou, *member;
  32. struct irstate *dst;
  33. ssize_t offset;
  34. sou = n->expr.args;
  35. member = sou->next;
  36. dst = &n->sym->irs;
  37. ir_init_sym(prog->ir, n->sym);
  38. offset = type_offsetof(type_base(sou->sym->type), member->string.data);
  39. assert(offset >= 0);
  40. if (!sou->sym->irs.loc) {
  41. /* sou is a :deref which wasn't loaded by child, just
  42. * read the member we're interested in. */
  43. struct node *ptr = sou->expr.args;
  44. ir_emit_sym_to_reg(prog->ir, BPF_REG_3, ptr->sym);
  45. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, offset), BPF_REG_3, 0);
  46. goto probe_read;
  47. }
  48. offset += sou->sym->irs.stack;
  49. if (dst->loc == LOC_REG) {
  50. ir_emit_insn(prog->ir, LDX(bpf_width(dst->size), offset),
  51. dst->reg, BPF_REG_BP);
  52. return 0;
  53. }
  54. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, offset), BPF_REG_3, 0);
  55. probe_read:
  56. ir_emit_insn(prog->ir, MOV_IMM((int32_t)dst->size), BPF_REG_2, 0);
  57. ir_emit_insn(prog->ir, MOV, BPF_REG_1, BPF_REG_BP);
  58. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, dst->stack), BPF_REG_1, 0);
  59. ir_emit_insn(prog->ir, CALL(BPF_FUNC_probe_read), 0, 0);
  60. /* TODO if (r0) exit(r0); */
  61. return 0;
  62. }
  63. static int global_dot_type_infer(const struct func *func, struct node *n)
  64. {
  65. struct node *sou, *member;
  66. struct type *t;
  67. struct tfield *f;
  68. if (n->sym->type)
  69. return 0;
  70. sou = n->expr.args;
  71. member = sou->next;
  72. if (!sou->sym->type)
  73. return 0;
  74. t = type_base(sou->sym->type);
  75. /* TODO: add union */
  76. if (t->ttype != T_STRUCT) {
  77. _e("%#N: %N is neither struct nor union (type '%T').\n",
  78. n, sou, sou->sym->type);
  79. return -EINVAL;
  80. }
  81. f = tfields_get(t->sou.fields, member->string.data);
  82. if (!f) {
  83. _e("%#N: type '%T' has no member named %N.\n", n, t, member);
  84. return -EINVAL;
  85. }
  86. /* given `sou.member` where sou is a struct/union, infer that
  87. * the expression's type is equal to member's type. */
  88. n->sym->type = f->type;
  89. return 0;
  90. }
  91. /* :deref */
  92. static int global_deref_ir_post(const struct func *func, struct node *n,
  93. struct prog *prog)
  94. {
  95. struct node *ptr = n->expr.args;
  96. struct irstate *dst;
  97. size_t size;
  98. dst = &n->sym->irs;
  99. if (dst->hint.dot)
  100. /* (*ptr).member, ptr points to a struct and our
  101. * parent is only interested in one member. don't load
  102. * the struct, let the dot operaton steal the address
  103. * from our argument */
  104. return 0;
  105. ir_init_sym(prog->ir, n->sym);
  106. if (dst->hint.lval)
  107. /* *ptr = val, whatever is in our storage now it will
  108. be overwritten, so skip the load. */
  109. return 0;
  110. ir_emit_sym_to_reg(prog->ir, BPF_REG_3, ptr->sym);
  111. ir_emit_read_to_sym(prog->ir, n->sym, BPF_REG_3);
  112. return 0;
  113. }
  114. static int global_deref_type_infer(const struct func *func, struct node *n)
  115. {
  116. struct node *ptr = n->expr.args;
  117. struct type *t;
  118. if (n->sym->type || !ptr->sym->type)
  119. return 0;
  120. t = type_base(ptr->sym->type);
  121. if (t->ttype != T_POINTER) {
  122. _e("%#N: can't dereference %N (type '%T').\n",
  123. n, ptr, ptr->sym->type);
  124. return -EINVAL;
  125. }
  126. /* given `*p` where p is a pointer, infer that the
  127. * expression's type is equal to p's concrete type. */
  128. n->sym->type = t->ptr.type;
  129. return 0;
  130. }
  131. /* :map */
  132. static int map_ir_update(struct node *n, struct prog *prog)
  133. {
  134. struct node *map = n->expr.args;
  135. ir_emit_ldmap(prog->ir, BPF_REG_1, map->sym);
  136. ir_emit_insn(prog->ir, MOV, BPF_REG_2, BPF_REG_BP);
  137. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, map->sym->irs.stack), BPF_REG_2, 0);
  138. ir_emit_insn(prog->ir, MOV, BPF_REG_3, BPF_REG_BP);
  139. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, n->sym->irs.stack), BPF_REG_3, 0);
  140. ir_emit_insn(prog->ir, MOV_IMM(0), BPF_REG_4, 0);
  141. ir_emit_insn(prog->ir, CALL(BPF_FUNC_map_update_elem), 0, 0);
  142. /* TODO: if (r0) exit(r0); */
  143. return 0;
  144. }
  145. static int map_ir_pre_key(struct node *n, struct prog *prog)
  146. {
  147. struct node *map = n->expr.args, *arg;
  148. struct type *ktype = type_base(map->sym->type->map.ktype);
  149. ssize_t stack = map->sym->irs.stack;
  150. size_t offset, size, pad;
  151. struct tfield *f;
  152. arg = map->next;
  153. tfields_foreach(f, ktype->sou.fields) {
  154. offset = type_offsetof(ktype, f->name);
  155. size = type_sizeof(f->type);
  156. if (!arg->sym->irs.loc) {
  157. arg->sym->irs.hint.stack = 1;
  158. arg->sym->irs.stack = stack + offset;
  159. }
  160. if (arg->next) {
  161. pad = type_offsetof(ktype, f[1].name) - (offset + size);
  162. if (pad)
  163. ir_emit_bzero(prog->ir,
  164. stack + offset + size, pad);
  165. }
  166. arg = arg->next;
  167. }
  168. pad = type_sizeof(ktype) - (offset + size);
  169. if (pad)
  170. ir_emit_bzero(prog->ir, stack + offset + size, pad);
  171. return 0;
  172. }
  173. static int map_ir_pre(const struct func *func, struct node *n,
  174. struct prog *prog)
  175. {
  176. struct irstate *kirs;
  177. struct node *map = n->expr.args;
  178. struct type *ktype = type_base(map->sym->type->map.ktype);
  179. map->sym->irs.hint.stack = 1;
  180. ir_init_irs(prog->ir, &map->sym->irs, ktype);
  181. if (ktype->ttype == T_STRUCT)
  182. return map_ir_pre_key(n, prog);
  183. kirs = &map->next->sym->irs;
  184. if (!kirs->loc) {
  185. kirs->hint.stack = 1;
  186. kirs->stack = map->sym->irs.stack;
  187. }
  188. return 0;
  189. }
  190. static int map_ir_post(const struct func *func, struct node *n,
  191. struct prog *prog)
  192. {
  193. struct node *map = n->expr.args, *arg;
  194. struct type *ktype = type_base(map->sym->type->map.ktype);
  195. ssize_t stack = map->sym->irs.stack;
  196. size_t offset;
  197. struct tfield *f;
  198. int16_t lmiss, lhit;
  199. arg = map->next;
  200. if (ktype->ttype == T_STRUCT) {
  201. tfields_foreach(f, ktype->sou.fields) {
  202. offset = type_offsetof(ktype, f->name);
  203. ir_emit_sym_to_stack(prog->ir, stack + offset, arg->sym);
  204. arg = arg->next;
  205. }
  206. } else {
  207. ir_emit_sym_to_stack(prog->ir, stack, arg->sym);
  208. assert(!arg->next);
  209. }
  210. n->sym->irs.hint.stack = 1;
  211. ir_init_sym(prog->ir, n->sym);
  212. if (n->sym->irs.hint.lval)
  213. /* map[key] = val, whatever is in our storage now it
  214. will be overwritten, so skip the load. */
  215. return 0;
  216. ir_emit_ldmap(prog->ir, BPF_REG_1, map->sym);
  217. ir_emit_insn(prog->ir, MOV, BPF_REG_2, BPF_REG_BP);
  218. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, stack), BPF_REG_BP, 0);
  219. ir_emit_insn(prog->ir, CALL(BPF_FUNC_map_lookup_elem), 0, 0);
  220. lmiss = ir_alloc_label(prog->ir);
  221. lhit = ir_alloc_label(prog->ir);
  222. ir_emit_insn(prog->ir, JMP_IMM(BPF_JEQ, 0, lmiss), BPF_REG_0, 0);
  223. ir_emit_read_to_sym(prog->ir, n->sym, BPF_REG_0);
  224. ir_emit_insn(prog->ir, JMP(BPF_JA, lhit), 0, 0);
  225. ir_emit_label(prog->ir, lmiss);
  226. ir_emit_bzero(prog->ir, n->sym->irs.stack, n->sym->irs.size);
  227. ir_emit_label(prog->ir, lhit);
  228. return 0;
  229. }
  230. static struct type *map_key_type(struct node *n)
  231. {
  232. struct node *map, *key;
  233. struct type *ktype;
  234. struct tfield *kfields, *f;
  235. int i, nargs = node_nargs(n);
  236. char *kname;
  237. map = n->expr.args;
  238. if (nargs == 2)
  239. return map->next->sym->type;
  240. ktype = calloc(1, sizeof(*ktype));
  241. assert(ktype);
  242. kfields = calloc(nargs, sizeof(*kfields));
  243. assert(kfields);
  244. for (key = map->next, f = kfields, i = 0; key; key = key->next, f++, i++) {
  245. asprintf(&f->name, "k%d", i);
  246. f->type = key->sym->type;
  247. }
  248. asprintf(&ktype->sou.name, ":%s_key", map->ident.name);
  249. ktype->ttype = T_STRUCT;
  250. ktype->sou.fields = kfields;
  251. type_add(ktype);
  252. return ktype;
  253. }
  254. static int map_type_validate(struct node *n)
  255. {
  256. /* TODO */
  257. return 0;
  258. }
  259. static int map_type_infer(const struct func *func, struct node *n)
  260. {
  261. struct node *map, *key;
  262. struct type *ktype;
  263. map = n->expr.args;
  264. if (!map->sym)
  265. return 0;
  266. if (map->sym->type) {
  267. if (!n->sym->type)
  268. /* given `m[key]` where m's type is known,
  269. * infer that the expression's type is equal
  270. * to m's value type. */
  271. n->sym->type = map->sym->type->map.vtype;
  272. return map_type_validate(n);
  273. }
  274. if (!n->sym->type)
  275. return 0;
  276. for (key = map->next; key; key = key->next) {
  277. if (type_sizeof(key->sym->type) < 0)
  278. return 0;
  279. }
  280. map->sym->type = type_map_of(map_key_type(n), n->sym->type);
  281. return 0;
  282. }
  283. static int map_static_validate(const struct func *func, struct node *n)
  284. {
  285. if (n->expr.args->ntype != N_IDENT) {
  286. _e("%#N: can't lookup a key in %N, which is not a map.\n",
  287. n, n);
  288. return -EINVAL;
  289. }
  290. return 0;
  291. }
  292. /* :assign */
  293. static int global_assign_ir_pre(const struct func *func, struct node *n,
  294. struct prog *prog)
  295. {
  296. struct node *lval, *rval;
  297. lval = n->expr.args;
  298. rval = lval->next;
  299. n->sym->irs.hint.stack = 1;
  300. ir_init_irs(prog->ir, &n->sym->irs, lval->sym->type);
  301. lval->sym->irs.hint.lval = 1;
  302. lval->sym->irs.hint.stack = 1;
  303. lval->sym->irs.stack = n->sym->irs.stack;
  304. rval->sym->irs.hint.stack = 1;
  305. rval->sym->irs.stack = n->sym->irs.stack;
  306. return 0;
  307. }
  308. static int global_assign_ir_post(const struct func *func, struct node *n,
  309. struct prog *prog)
  310. {
  311. struct node *lval, *rval;
  312. lval = n->expr.args;
  313. rval = lval->next;
  314. ir_emit_sym_to_sym(prog->ir, lval->sym, rval->sym);
  315. if (!node_is(lval, "{}"))
  316. return 0;
  317. return map_ir_update(lval, prog);
  318. }
  319. static int global_assign_type_infer(const struct func *func, struct node *n)
  320. {
  321. struct node *lval, *rval;
  322. int err;
  323. if (n->sym->type)
  324. return 0;
  325. lval = n->expr.args;
  326. rval = lval->next;
  327. if (!rval->sym->type)
  328. return 0;
  329. if (!lval->sym->type) {
  330. /* given `a = b` where b's type is known but not a's,
  331. * infer that a's type must be equal to b's */
  332. lval->sym->type = rval->sym->type;
  333. /* TODO do we need assignment expressions? */
  334. n->sym->type = &t_void;
  335. if (!node_is(lval, "{}"))
  336. return 0;
  337. err = map_type_infer(lval->sym->func, lval);
  338. if (err)
  339. return err;
  340. }
  341. if (type_compatible(lval->sym->type, rval->sym->type))
  342. return 0;
  343. _e("%#N: can't assign %N (type '%T'), to %N (type '%T').\n",
  344. n, rval, rval->sym->type, lval, lval->sym->type);
  345. return -EINVAL;
  346. }
  347. static int global_assign_static_validate(const struct func *func, struct node *n)
  348. {
  349. struct node *lval;
  350. lval = n->expr.args;
  351. if (node_is(lval, "{}") || (lval->ntype == N_IDENT))
  352. return 0;
  353. _e("%#N: can't assign a value to %N.\n", n, lval);
  354. return -EINVAL;
  355. }
  356. /* :binop */
  357. static int global_binop_type_infer(const struct func *func, struct node *n)
  358. {
  359. struct node *lval, *rval;
  360. if (n->sym->type)
  361. return 0;
  362. lval = n->expr.args;
  363. rval = lval->next;
  364. if (!lval->sym->type || !rval->sym->type)
  365. return 0;
  366. if (type_equal(lval->sym->type, rval->sym->type)) {
  367. n->sym->type = lval->sym->type;
  368. return 0;
  369. }
  370. /* TODO handle integer promotion */
  371. return 0;
  372. }
  373. /* count() */
  374. struct type t_count = {
  375. .ttype = T_SCALAR,
  376. .scalar = {
  377. .name = ":count",
  378. .size = sizeof(unsigned long),
  379. .is_signed = 0,
  380. },
  381. .aggregation = 1,
  382. };
  383. static int count_ir_post(const struct func *func, struct node *n,
  384. struct prog *prog)
  385. {
  386. struct node *mapop = n->expr.args;
  387. ir_emit_sym_to_reg(prog->ir, BPF_REG_0, mapop->sym);
  388. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, 1), BPF_REG_0, 0);
  389. ir_emit_reg_to_sym(prog->ir, mapop->sym, BPF_REG_0);
  390. return map_ir_update(mapop, prog);
  391. }
  392. static int count_type_infer(const struct func *func, struct node *n)
  393. {
  394. struct node *mapop;
  395. struct type *t;
  396. mapop = n->expr.args;
  397. if (n->sym->type)
  398. return 0;
  399. n->sym->type = &t_void;
  400. if (!mapop->sym->type) {
  401. mapop->sym->type = &t_count;
  402. return map_type_infer(mapop->sym->func, mapop);
  403. }
  404. return 0;
  405. }
  406. static int count_static_validate(const struct func *func, struct node *n)
  407. {
  408. struct node *mapop;
  409. mapop = n->expr.args;
  410. if (node_is(mapop, "{}"))
  411. return 0;
  412. _e("%#N: aggregation target must be a map, %N is not.\n", n, mapop);
  413. return -EINVAL;
  414. }
  415. /* quantize */
  416. static int global_quantize_type_infer(const struct func *func, struct node *n)
  417. {
  418. struct node *arg;
  419. struct type *t;
  420. arg = n->expr.args;
  421. if (n->sym->type || !arg->sym->type)
  422. return 0;
  423. t = type_base(arg->sym->type);
  424. if (t->ttype != T_SCALAR) {
  425. _e("%#N: can't quantize non-scalar value %N (type '%T').\n",
  426. n, arg, arg->sym->type);
  427. return -EINVAL;
  428. }
  429. n->sym->type = type_array_of(arg->sym->type, type_sizeof(t) * 8);
  430. return 0;
  431. }
  432. /* pid */
  433. static int global_pid_ir_post(const struct func *func, struct node *n,
  434. struct prog *prog)
  435. {
  436. struct node *ptr = n->expr.args;
  437. ir_init_sym(prog->ir, n->sym);
  438. ir_emit_insn(prog->ir, CALL(BPF_FUNC_get_current_pid_tgid), 0, 0);
  439. ir_emit_insn(prog->ir, ALU64_IMM(BPF_RSH, 32), BPF_REG_0, 0);
  440. ir_emit_reg_to_sym(prog->ir, n->sym, BPF_REG_0);
  441. return 0;
  442. }
  443. struct type t_pid = {
  444. .ttype = T_TYPEDEF,
  445. .tdef = { .name = ":pid", .type = &t_u32 },
  446. };
  447. struct type t_pid_func = {
  448. .ttype = T_FUNC,
  449. .func = { .type = &t_pid },
  450. };
  451. /* time */
  452. static int global_time_ir_post(const struct func *func, struct node *n,
  453. struct prog *prog)
  454. {
  455. struct node *ptr = n->expr.args;
  456. ir_init_sym(prog->ir, n->sym);
  457. ir_emit_insn(prog->ir, CALL(BPF_FUNC_ktime_get_ns), 0, 0);
  458. ir_emit_reg_to_sym(prog->ir, n->sym, BPF_REG_0);
  459. return 0;
  460. }
  461. struct type t_time = {
  462. .ttype = T_TYPEDEF, /* TODO: should be a T_FUNC with a static
  463. * signature */
  464. .tdef = { .name = ":time", .type = &t_s64 },
  465. };
  466. struct type t_time_func = {
  467. .ttype = T_FUNC,
  468. .func = { .type = &t_time },
  469. };
  470. /* */
  471. struct type t_block_func = {
  472. .ttype = T_FUNC,
  473. .func = { .type = &t_void, .vargs = 1 },
  474. };
  475. struct type t_string_array = {
  476. .ttype = T_ARRAY,
  477. .array = { .type = &t_char, .len = 64 }, /* TODO: tunable */
  478. };
  479. struct type t_string = {
  480. .ttype = T_TYPEDEF,
  481. .tdef = { .name = ":string", .type = &t_string_array },
  482. };
  483. struct tfield f_dot[] = {
  484. { .type = &t_void },
  485. { .type = &t_string },
  486. { .type = NULL }
  487. };
  488. struct type t_dot_func = {
  489. .ttype = T_FUNC,
  490. .func = { .type = &t_void, .args = f_dot },
  491. };
  492. struct tfield f_2args[] = {
  493. { .type = &t_void },
  494. { .type = &t_void },
  495. { .type = NULL }
  496. };
  497. struct type t_2args_func = {
  498. .ttype = T_FUNC,
  499. .func = { .type = &t_void, .args = f_2args },
  500. };
  501. struct tfield f_1arg[] = {
  502. { .type = &t_void },
  503. { .type = NULL }
  504. };
  505. struct type t_1arg_func = {
  506. .ttype = T_FUNC,
  507. .func = { .type = &t_void, .args = f_1arg },
  508. };
  509. static const struct func global_funcs[] = {
  510. {
  511. .name = ":block",
  512. .type = &t_block_func,
  513. .static_ret = 1,
  514. },
  515. {
  516. .name = ".",
  517. .type = &t_dot_func,
  518. .type_infer = global_dot_type_infer,
  519. .ir_pre = global_dot_ir_pre,
  520. .ir_post = global_dot_ir_post,
  521. },
  522. {
  523. .name = ":deref",
  524. .type = &t_1arg_func,
  525. .type_infer = global_deref_type_infer,
  526. .ir_post = global_deref_ir_post,
  527. },
  528. {
  529. .name = "+",
  530. .type = &t_2args_func,
  531. .type_infer = global_binop_type_infer,
  532. },
  533. {
  534. .name = "-",
  535. .type = &t_2args_func,
  536. .type_infer = global_binop_type_infer,
  537. },
  538. {
  539. .name = "=",
  540. .type = &t_2args_func,
  541. .type_infer = global_assign_type_infer,
  542. .static_validate = global_assign_static_validate,
  543. .ir_pre = global_assign_ir_pre,
  544. .ir_post = global_assign_ir_post,
  545. },
  546. {
  547. .name = "{}",
  548. /* .type = t_map_func, */
  549. .type_infer = map_type_infer,
  550. .static_validate = map_static_validate,
  551. .ir_pre = map_ir_pre,
  552. .ir_post = map_ir_post,
  553. },
  554. {
  555. .name = "count",
  556. .type = &t_1arg_func,
  557. .static_validate = count_static_validate,
  558. .type_infer = count_type_infer,
  559. .ir_post = count_ir_post,
  560. },
  561. {
  562. .name = "pid",
  563. .type = &t_pid_func,
  564. .static_ret = 1,
  565. .ir_post = global_pid_ir_post,
  566. },
  567. {
  568. .name = "time",
  569. .type = &t_time_func,
  570. .static_ret = 1,
  571. .ir_post = global_time_ir_post,
  572. },
  573. {
  574. .name = "quantize",
  575. .type = &t_1arg_func,
  576. .type_infer = global_quantize_type_infer,
  577. },
  578. { .name = NULL }
  579. };
  580. static struct type *global_num_type(struct node *n)
  581. {
  582. if (n->num.unsignd) {
  583. if (n->num.u64 <= INT_MAX)
  584. return &t_int;
  585. else if (n->num.u64 <= UINT_MAX)
  586. return &t_uint;
  587. else if (n->num.u64 <= LONG_MAX)
  588. return &t_long;
  589. else if (n->num.u64 <= ULONG_MAX)
  590. return &t_ulong;
  591. else if (n->num.u64 <= LLONG_MAX)
  592. return &t_llong;
  593. else if (n->num.u64 <= ULLONG_MAX)
  594. return &t_ullong;
  595. } else {
  596. if (n->num.s64 >= INT_MIN && n->num.s64 <= INT_MAX)
  597. return &t_int;
  598. else if (n->num.s64 >= LONG_MIN && n->num.s64 <= LONG_MAX)
  599. return &t_long;
  600. else if (n->num.s64 >= LLONG_MIN && n->num.s64 <= LLONG_MAX)
  601. return &t_llong;
  602. }
  603. assert(0);
  604. return NULL;
  605. }
  606. static int global_num_ir_post(const struct func *func, struct node *n,
  607. struct prog *prog)
  608. {
  609. struct irstate *irs = &n->sym->irs;
  610. if ((n->num.unsignd && (n->num.u64 <= INT32_MAX)) ||
  611. (n->num.s64 >= INT32_MIN && n->num.s64 <= INT32_MAX)) {
  612. irs->loc = LOC_IMM;
  613. irs->imm = n->num.s64;
  614. irs->size = 4;
  615. return 0;
  616. }
  617. /* we need to build the constant in a register, so ignore any
  618. * advise about stack allocation. */
  619. irs->hint.stack = 0;
  620. ir_init_sym(prog->ir, n->sym);
  621. if (n->num.u64 > 0x3fffffffffffffff) {
  622. ir_emit_insn(prog->ir, MOV64_IMM(n->num.u64 >> 33), irs->reg, 0);
  623. ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 31), irs->reg, 0);
  624. if ((n->num.u64 >> 2) & 0x7fffffff)
  625. ir_emit_insn(prog->ir,
  626. ALU64_IMM(BPF_OR, (n->num.u64 >> 2) & 0x7fffffff),
  627. irs->reg, 0);
  628. ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 2), irs->reg, 0);
  629. if (n->num.u64 & 0x3)
  630. ir_emit_insn(prog->ir, ALU64_IMM(BPF_OR, n->num.u64 & 0x3),
  631. irs->reg, 0);
  632. } else if (n->num.u64 > 0x7fffffff) {
  633. ir_emit_insn(prog->ir, MOV64_IMM(n->num.u64 >> 31), irs->reg, 0);
  634. ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 31), irs->reg, 0);
  635. if (n->num.u64 & 0x7fffffff)
  636. ir_emit_insn(prog->ir,
  637. ALU64_IMM(BPF_OR, n->num.u64 & 0x7fffffff),
  638. irs->reg, 0);
  639. }
  640. return 0;
  641. }
  642. static const struct func global_num_func = {
  643. .name = ":num",
  644. .ir_post = global_num_ir_post,
  645. };
  646. static const struct func global_string_func = {
  647. .name = ":string",
  648. .type = &t_string,
  649. .static_ret = 1,
  650. };
  651. static const struct func global_ident_func = {
  652. .name = ":ident",
  653. };
  654. static const struct func *global_sym_alloc_expr(struct node *n)
  655. {
  656. const struct func *func;
  657. int err;
  658. for (func = global_funcs; func->name; func++) {
  659. if (strcmp(func->name, n->expr.func))
  660. continue;
  661. return func;
  662. }
  663. return NULL;
  664. }
  665. int global_sym_alloc(struct prog *prog, struct node *n)
  666. {
  667. const struct func *func;
  668. struct symtab *st = prog->locals;
  669. int err;
  670. switch (n->ntype) {
  671. case N_EXPR:
  672. func = global_sym_alloc_expr(n);
  673. break;
  674. case N_IDENT:
  675. st = prog->globals;
  676. func = &global_ident_func;
  677. break;
  678. case N_NUM:
  679. func = &global_num_func;
  680. break;
  681. case N_STRING:
  682. func = &global_string_func;
  683. break;
  684. }
  685. if (!func)
  686. return -ENOENT;
  687. err = func_static_validate(func, n);
  688. if (err)
  689. return err;
  690. n->sym = sym_alloc(st, n, func);
  691. if (n->ntype == N_NUM)
  692. n->sym->type = global_num_type(n);
  693. else if (func->static_ret)
  694. n->sym->type = func_return_type(func);
  695. return 0;
  696. }
  697. int global_probe(struct prog *prog)
  698. {
  699. return 0;
  700. }
  701. struct provider global = {
  702. .name = ":",
  703. .sym_alloc = global_sym_alloc,
  704. .probe = global_probe,
  705. };
  706. __attribute__((constructor))
  707. static void global_init(void)
  708. {
  709. provider_register(&global);
  710. }