A dynamic tracer for Linux

global.c 17KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832
  1. #define _GNU_SOURCE /* asprintf */
  2. #include <assert.h>
  3. #include <errno.h>
  4. #include <limits.h>
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include "arch.h"
  8. #include "func.h"
  9. #include "node.h"
  10. #include "ply.h"
  11. #include "sym.h"
  12. #include "type.h"
  13. /* . */
  14. static int global_dot_ir_pre(const struct func *func, struct node *n,
  15. struct prog *prog)
  16. {
  17. struct node *sou = n->expr.args;
  18. if (node_is(sou, ":deref")) {
  19. /* (*ptr).member, if *ptr is not already loaded let it
  20. * know that we're only interested in one member */
  21. sou->sym->irs.hint.dot = 1;
  22. /* this also means we need to put ourselves on the
  23. * stack since data will be loaded via probe_read */
  24. n->sym->irs.hint.stack = 1;
  25. }
  26. return 0;
  27. }
  28. static int global_dot_ir_post(const struct func *func, struct node *n,
  29. struct prog *prog)
  30. {
  31. struct node *sou, *member;
  32. struct irstate *dst;
  33. ssize_t offset;
  34. sou = n->expr.args;
  35. member = sou->next;
  36. dst = &n->sym->irs;
  37. ir_init_sym(prog->ir, n->sym);
  38. offset = type_offsetof(type_base(sou->sym->type), member->string.data);
  39. assert(offset >= 0);
  40. if (!sou->sym->irs.loc) {
  41. /* sou is a :deref which wasn't loaded by child, just
  42. * read the member we're interested in. */
  43. struct node *ptr = sou->expr.args;
  44. ir_emit_sym_to_reg(prog->ir, BPF_REG_3, ptr->sym);
  45. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, offset), BPF_REG_3, 0);
  46. goto probe_read;
  47. }
  48. offset += sou->sym->irs.stack;
  49. if (dst->loc == LOC_REG) {
  50. ir_emit_insn(prog->ir, LDX(bpf_width(dst->size), offset),
  51. dst->reg, BPF_REG_BP);
  52. return 0;
  53. }
  54. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, offset), BPF_REG_3, 0);
  55. probe_read:
  56. ir_emit_insn(prog->ir, MOV_IMM((int32_t)dst->size), BPF_REG_2, 0);
  57. ir_emit_insn(prog->ir, MOV, BPF_REG_1, BPF_REG_BP);
  58. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, dst->stack), BPF_REG_1, 0);
  59. ir_emit_insn(prog->ir, CALL(BPF_FUNC_probe_read), 0, 0);
  60. /* TODO if (r0) exit(r0); */
  61. return 0;
  62. }
  63. static int global_dot_type_infer(const struct func *func, struct node *n)
  64. {
  65. struct node *sou, *member;
  66. struct type *t;
  67. struct tfield *f;
  68. if (n->sym->type)
  69. return 0;
  70. sou = n->expr.args;
  71. member = sou->next;
  72. if (!sou->sym->type)
  73. return 0;
  74. t = type_base(sou->sym->type);
  75. /* TODO: add union */
  76. if (t->ttype != T_STRUCT) {
  77. _e("%#N: %N is neither struct nor union (type '%T').\n",
  78. n, sou, sou->sym->type);
  79. return -EINVAL;
  80. }
  81. f = tfields_get(t->sou.fields, member->string.data);
  82. if (!f) {
  83. _e("%#N: type '%T' has no member named %N.\n", n, t, member);
  84. return -EINVAL;
  85. }
  86. /* given `sou.member` where sou is a struct/union, infer that
  87. * the expression's type is equal to member's type. */
  88. n->sym->type = f->type;
  89. return 0;
  90. }
  91. /* :deref */
  92. static int global_deref_ir_post(const struct func *func, struct node *n,
  93. struct prog *prog)
  94. {
  95. struct node *ptr = n->expr.args;
  96. struct irstate *dst;
  97. size_t size;
  98. dst = &n->sym->irs;
  99. if (dst->hint.dot)
  100. /* (*ptr).member, ptr points to a struct and our
  101. * parent is only interested in one member. don't load
  102. * the struct, let the dot operaton steal the address
  103. * from our argument */
  104. return 0;
  105. ir_init_sym(prog->ir, n->sym);
  106. if (dst->hint.lval)
  107. /* *ptr = val, whatever is in our storage now it will
  108. be overwritten, so skip the load. */
  109. return 0;
  110. ir_emit_sym_to_reg(prog->ir, BPF_REG_3, ptr->sym);
  111. ir_emit_read_to_sym(prog->ir, n->sym, BPF_REG_3);
  112. return 0;
  113. }
  114. static int global_deref_type_infer(const struct func *func, struct node *n)
  115. {
  116. struct node *ptr = n->expr.args;
  117. struct type *t;
  118. if (n->sym->type || !ptr->sym->type)
  119. return 0;
  120. t = type_base(ptr->sym->type);
  121. if (t->ttype != T_POINTER) {
  122. _e("%#N: can't dereference %N (type '%T').\n",
  123. n, ptr, ptr->sym->type);
  124. return -EINVAL;
  125. }
  126. /* given `*p` where p is a pointer, infer that the
  127. * expression's type is equal to p's concrete type. */
  128. n->sym->type = t->ptr.type;
  129. return 0;
  130. }
  131. /* :map */
  132. static int global_map_ir_pre_key(struct node *n, struct prog *prog)
  133. {
  134. struct node *map = n->expr.args, *arg;
  135. struct type *ktype = type_base(map->sym->type->map.ktype);
  136. ssize_t stack = map->sym->irs.stack;
  137. size_t offset, size, pad;
  138. struct tfield *f;
  139. arg = map->next;
  140. tfields_foreach(f, ktype->sou.fields) {
  141. offset = type_offsetof(ktype, f->name);
  142. size = type_sizeof(f->type);
  143. if (!arg->sym->irs.loc) {
  144. arg->sym->irs.hint.stack = 1;
  145. arg->sym->irs.stack = stack + offset;
  146. }
  147. if (arg->next) {
  148. pad = type_offsetof(ktype, f[1].name) - (offset + size);
  149. if (pad)
  150. ir_emit_bzero(prog->ir,
  151. stack + offset + size, pad);
  152. }
  153. arg = arg->next;
  154. }
  155. pad = type_sizeof(ktype) - (offset + size);
  156. if (pad)
  157. ir_emit_bzero(prog->ir, stack + offset + size, pad);
  158. return 0;
  159. }
  160. static int global_map_ir_pre(const struct func *func, struct node *n,
  161. struct prog *prog)
  162. {
  163. struct irstate *kirs;
  164. struct node *map = n->expr.args;
  165. struct type *ktype = type_base(map->sym->type->map.ktype);
  166. map->sym->irs.hint.stack = 1;
  167. ir_init_irs(prog->ir, &map->sym->irs, ktype);
  168. if (ktype->ttype == T_STRUCT)
  169. return global_map_ir_pre_key(n, prog);
  170. kirs = &map->next->sym->irs;
  171. if (!kirs->loc) {
  172. kirs->hint.stack = 1;
  173. kirs->stack = map->sym->irs.stack;
  174. }
  175. return 0;
  176. }
  177. static int global_map_ir_post(const struct func *func, struct node *n,
  178. struct prog *prog)
  179. {
  180. struct node *map = n->expr.args, *arg;
  181. struct type *ktype = type_base(map->sym->type->map.ktype);
  182. ssize_t stack = map->sym->irs.stack;
  183. size_t offset;
  184. struct tfield *f;
  185. int16_t lmiss, lhit;
  186. arg = map->next;
  187. tfields_foreach(f, ktype->sou.fields) {
  188. offset = type_offsetof(ktype, f->name);
  189. ir_emit_sym_to_stack(prog->ir, stack + offset, arg->sym);
  190. arg = arg->next;
  191. }
  192. n->sym->irs.hint.stack = 1;
  193. ir_init_sym(prog->ir, n->sym);
  194. if (n->sym->irs.hint.lval)
  195. /* map[key] = val, whatever is in our storage now it
  196. will be overwritten, so skip the load. */
  197. return 0;
  198. ir_emit_ldmap(prog->ir, BPF_REG_1, map->sym);
  199. ir_emit_insn(prog->ir, MOV, BPF_REG_2, BPF_REG_BP);
  200. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, stack), BPF_REG_BP, 0);
  201. ir_emit_insn(prog->ir, CALL(BPF_FUNC_map_lookup_elem), 0, 0);
  202. lmiss = ir_alloc_label(prog->ir);
  203. lhit = ir_alloc_label(prog->ir);
  204. ir_emit_insn(prog->ir, JMP_IMM(BPF_JEQ, 0, lmiss), BPF_REG_0, 0);
  205. ir_emit_read_to_sym(prog->ir, n->sym, BPF_REG_0);
  206. ir_emit_insn(prog->ir, JMP(BPF_JA, lhit), 0, 0);
  207. ir_emit_label(prog->ir, lmiss);
  208. ir_emit_bzero(prog->ir, n->sym->irs.stack, n->sym->irs.size);
  209. ir_emit_label(prog->ir, lhit);
  210. return 0;
  211. }
  212. static struct type *global_map_ktype(struct node *n)
  213. {
  214. struct node *map, *key;
  215. struct type *ktype;
  216. struct tfield *kfields, *f;
  217. int i, nargs = node_nargs(n);
  218. char *kname;
  219. map = n->expr.args;
  220. if (nargs == 2)
  221. return map->next->sym->type;
  222. ktype = calloc(1, sizeof(*ktype));
  223. assert(ktype);
  224. kfields = calloc(nargs, sizeof(*kfields));
  225. assert(kfields);
  226. for (key = map->next, f = kfields, i = 0; key; key = key->next, f++, i++) {
  227. asprintf(&f->name, "k%d", i);
  228. f->type = key->sym->type;
  229. }
  230. asprintf(&ktype->sou.name, ":%s_key", map->ident.name);
  231. ktype->ttype = T_STRUCT;
  232. ktype->sou.fields = kfields;
  233. type_add(ktype);
  234. return ktype;
  235. }
  236. static int global_map_type_infer(const struct func *func, struct node *n)
  237. {
  238. struct node *map = n->expr.args;
  239. if (!map->sym->type)
  240. return 0;
  241. /* TODO validate key against known type */
  242. /* given `m[key]` where m's type is known, infer that the
  243. * expression's type is equal to m's value type. */
  244. n->sym->type = map->sym->type->map.vtype;
  245. return 0;
  246. }
  247. static int global_map_static_validate(const struct func *func, struct node *n)
  248. {
  249. if (n->expr.args->ntype != N_IDENT) {
  250. _e("%#N: can't lookup a key in %N, which is not a map.\n",
  251. n, n);
  252. return -EINVAL;
  253. }
  254. return 0;
  255. }
  256. /* :assign */
  257. static int global_assign_ir_pre(const struct func *func, struct node *n,
  258. struct prog *prog)
  259. {
  260. struct node *lval, *rval;
  261. lval = n->expr.args;
  262. rval = lval->next;
  263. n->sym->irs.hint.stack = 1;
  264. ir_init_irs(prog->ir, &n->sym->irs, lval->sym->type);
  265. lval->sym->irs.hint.lval = 1;
  266. lval->sym->irs.hint.stack = 1;
  267. lval->sym->irs.stack = n->sym->irs.stack;
  268. rval->sym->irs.hint.stack = 1;
  269. rval->sym->irs.stack = n->sym->irs.stack;
  270. return 0;
  271. }
  272. static int global_assign_ir_post(const struct func *func, struct node *n,
  273. struct prog *prog)
  274. {
  275. struct node *lval, *rval, *map;
  276. lval = n->expr.args;
  277. rval = lval->next;
  278. ir_emit_sym_to_sym(prog->ir, lval->sym, rval->sym);
  279. if (!node_is(lval, "{}"))
  280. return 0;
  281. map = lval->expr.args;
  282. ir_emit_ldmap(prog->ir, BPF_REG_1, map->sym);
  283. ir_emit_insn(prog->ir, MOV, BPF_REG_2, BPF_REG_BP);
  284. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, map->sym->irs.stack), BPF_REG_2, 0);
  285. ir_emit_insn(prog->ir, MOV, BPF_REG_3, BPF_REG_BP);
  286. ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, lval->sym->irs.stack), BPF_REG_3, 0);
  287. ir_emit_insn(prog->ir, MOV_IMM(0), BPF_REG_4, 0);
  288. ir_emit_insn(prog->ir, CALL(BPF_FUNC_map_update_elem), 0, 0);
  289. /* TODO: if (r0) exit(r0); */
  290. return 0;
  291. }
  292. static int global_assign_type_infer_map(struct node *n)
  293. {
  294. struct node *map, *key;
  295. struct type *ktype;
  296. map = n->expr.args;
  297. for (key = map->next; key; key = key->next) {
  298. if (type_sizeof(key->sym->type) < 0)
  299. return 0;
  300. }
  301. map->sym->type = type_map_of(global_map_ktype(n), n->sym->type);
  302. return 0;
  303. }
  304. static int global_assign_type_infer(const struct func *func, struct node *n)
  305. {
  306. struct node *lval, *rval;
  307. if (n->sym->type)
  308. return 0;
  309. lval = n->expr.args;
  310. rval = lval->next;
  311. if (!rval->sym->type)
  312. return 0;
  313. if (!lval->sym->type) {
  314. /* given `a = b` where b's type is known but not a's,
  315. * infer that a's type must be equal to b's */
  316. lval->sym->type = rval->sym->type;
  317. /* TODO do we need assignment expressions? */
  318. n->sym->type = &t_void;
  319. if (node_is(lval, "{}"))
  320. return global_assign_type_infer_map(lval);
  321. return 0;
  322. }
  323. if (type_compatible(lval->sym->type, rval->sym->type))
  324. return 0;
  325. _e("%#N: can't assign %N (type '%T'), to %N (type '%T').\n",
  326. n, rval, rval->sym->type, lval, lval->sym->type);
  327. return -EINVAL;
  328. }
  329. static int global_assign_static_validate(const struct func *func, struct node *n)
  330. {
  331. struct node *lval;
  332. lval = n->expr.args;
  333. if (node_is(lval, "{}") || (lval->ntype == N_IDENT))
  334. return 0;
  335. _e("%#N: can't assign a value to %N.\n", n, lval);
  336. return -EINVAL;
  337. }
  338. /* :binop */
  339. static int global_binop_type_infer(const struct func *func, struct node *n)
  340. {
  341. struct node *lval, *rval;
  342. if (n->sym->type)
  343. return 0;
  344. lval = n->expr.args;
  345. rval = lval->next;
  346. if (!lval->sym->type || !rval->sym->type)
  347. return 0;
  348. if (type_equal(lval->sym->type, rval->sym->type)) {
  349. n->sym->type = lval->sym->type;
  350. return 0;
  351. }
  352. /* TODO handle integer promotion */
  353. return 0;
  354. }
  355. /* :binop */
  356. static int global_quantize_type_infer(const struct func *func, struct node *n)
  357. {
  358. struct node *arg;
  359. struct type *t;
  360. arg = n->expr.args;
  361. if (n->sym->type || !arg->sym->type)
  362. return 0;
  363. t = type_base(arg->sym->type);
  364. if (t->ttype != T_SCALAR) {
  365. _e("%#N: can't quantize non-scalar value %N (type '%T').\n",
  366. n, arg, arg->sym->type);
  367. return -EINVAL;
  368. }
  369. n->sym->type = type_array_of(arg->sym->type, type_sizeof(t) * 8);
  370. return 0;
  371. }
  372. /* pid */
  373. static int global_pid_ir_post(const struct func *func, struct node *n,
  374. struct prog *prog)
  375. {
  376. struct node *ptr = n->expr.args;
  377. ir_init_sym(prog->ir, n->sym);
  378. ir_emit_insn(prog->ir, CALL(BPF_FUNC_get_current_pid_tgid), 0, 0);
  379. ir_emit_insn(prog->ir, ALU64_IMM(BPF_RSH, 32), BPF_REG_0, 0);
  380. ir_emit_reg_to_sym(prog->ir, n->sym, BPF_REG_0);
  381. return 0;
  382. }
  383. struct type t_pid = {
  384. .ttype = T_TYPEDEF,
  385. .tdef = { .name = ":pid", .type = &t_u32 },
  386. };
  387. struct type t_pid_func = {
  388. .ttype = T_FUNC,
  389. .func = { .type = &t_pid },
  390. };
  391. /* time */
  392. static int global_time_ir_post(const struct func *func, struct node *n,
  393. struct prog *prog)
  394. {
  395. struct node *ptr = n->expr.args;
  396. ir_init_sym(prog->ir, n->sym);
  397. ir_emit_insn(prog->ir, CALL(BPF_FUNC_ktime_get_ns), 0, 0);
  398. ir_emit_reg_to_sym(prog->ir, n->sym, BPF_REG_0);
  399. return 0;
  400. }
  401. struct type t_time = {
  402. .ttype = T_TYPEDEF, /* TODO: should be a T_FUNC with a static
  403. * signature */
  404. .tdef = { .name = ":time", .type = &t_s64 },
  405. };
  406. struct type t_time_func = {
  407. .ttype = T_FUNC,
  408. .func = { .type = &t_time },
  409. };
  410. /* */
  411. struct type t_block_func = {
  412. .ttype = T_FUNC,
  413. .func = { .type = &t_void, .vargs = 1 },
  414. };
  415. struct type t_string_array = {
  416. .ttype = T_ARRAY,
  417. .array = { .type = &t_char, .len = 64 }, /* TODO: tunable */
  418. };
  419. struct type t_string = {
  420. .ttype = T_TYPEDEF,
  421. .tdef = { .name = ":string", .type = &t_string_array },
  422. };
  423. struct tfield f_dot[] = {
  424. { .type = &t_void },
  425. { .type = &t_string },
  426. { .type = NULL }
  427. };
  428. struct type t_dot_func = {
  429. .ttype = T_FUNC,
  430. .func = { .type = &t_void, .args = f_dot },
  431. };
  432. struct tfield f_2args[] = {
  433. { .type = &t_void },
  434. { .type = &t_void },
  435. { .type = NULL }
  436. };
  437. struct type t_2args_func = {
  438. .ttype = T_FUNC,
  439. .func = { .type = &t_void, .args = f_2args },
  440. };
  441. struct tfield f_1arg[] = {
  442. { .type = &t_void },
  443. { .type = NULL }
  444. };
  445. struct type t_1arg_func = {
  446. .ttype = T_FUNC,
  447. .func = { .type = &t_void, .args = f_1arg },
  448. };
  449. static const struct func global_funcs[] = {
  450. {
  451. .name = ":block",
  452. .type = &t_block_func,
  453. .static_ret = 1,
  454. },
  455. {
  456. .name = ".",
  457. .type = &t_dot_func,
  458. .type_infer = global_dot_type_infer,
  459. .ir_pre = global_dot_ir_pre,
  460. .ir_post = global_dot_ir_post,
  461. },
  462. {
  463. .name = ":deref",
  464. .type = &t_1arg_func,
  465. .type_infer = global_deref_type_infer,
  466. .ir_post = global_deref_ir_post,
  467. },
  468. {
  469. .name = "+",
  470. .type = &t_2args_func,
  471. .type_infer = global_binop_type_infer,
  472. },
  473. {
  474. .name = "-",
  475. .type = &t_2args_func,
  476. .type_infer = global_binop_type_infer,
  477. },
  478. {
  479. .name = "=",
  480. .type = &t_2args_func,
  481. .type_infer = global_assign_type_infer,
  482. .static_validate = global_assign_static_validate,
  483. .ir_pre = global_assign_ir_pre,
  484. .ir_post = global_assign_ir_post,
  485. },
  486. {
  487. .name = "{}",
  488. /* .type = t_map_func, */
  489. .type_infer = global_map_type_infer,
  490. .static_validate = global_map_static_validate,
  491. .ir_pre = global_map_ir_pre,
  492. .ir_post = global_map_ir_post,
  493. },
  494. {
  495. .name = "pid",
  496. .type = &t_pid_func,
  497. .static_ret = 1,
  498. .ir_post = global_pid_ir_post,
  499. },
  500. {
  501. .name = "time",
  502. .type = &t_time_func,
  503. .static_ret = 1,
  504. .ir_post = global_time_ir_post,
  505. },
  506. {
  507. .name = "quantize",
  508. .type = &t_1arg_func,
  509. .type_infer = global_quantize_type_infer,
  510. },
  511. { .name = NULL }
  512. };
  513. static struct type *global_num_type(struct node *n)
  514. {
  515. if (n->num.unsignd) {
  516. if (n->num.u64 <= INT_MAX)
  517. return &t_int;
  518. else if (n->num.u64 <= UINT_MAX)
  519. return &t_uint;
  520. else if (n->num.u64 <= LONG_MAX)
  521. return &t_long;
  522. else if (n->num.u64 <= ULONG_MAX)
  523. return &t_ulong;
  524. else if (n->num.u64 <= LLONG_MAX)
  525. return &t_llong;
  526. else if (n->num.u64 <= ULLONG_MAX)
  527. return &t_ullong;
  528. } else {
  529. if (n->num.s64 >= INT_MIN && n->num.s64 <= INT_MAX)
  530. return &t_int;
  531. else if (n->num.s64 >= LONG_MIN && n->num.s64 <= LONG_MAX)
  532. return &t_long;
  533. else if (n->num.s64 >= LLONG_MIN && n->num.s64 <= LLONG_MAX)
  534. return &t_llong;
  535. }
  536. assert(0);
  537. return NULL;
  538. }
  539. static int global_num_ir_post(const struct func *func, struct node *n,
  540. struct prog *prog)
  541. {
  542. struct irstate *irs = &n->sym->irs;
  543. if ((n->num.unsignd && (n->num.u64 <= INT32_MAX)) ||
  544. (n->num.s64 >= INT32_MIN && n->num.s64 <= INT32_MAX)) {
  545. irs->loc = LOC_IMM;
  546. irs->imm = n->num.s64;
  547. irs->size = 4;
  548. return 0;
  549. }
  550. /* we need to build the constant in a register, so ignore any
  551. * advise about stack allocation. */
  552. irs->hint.stack = 0;
  553. ir_init_sym(prog->ir, n->sym);
  554. if (n->num.u64 > 0x3fffffffffffffff) {
  555. ir_emit_insn(prog->ir, MOV64_IMM(n->num.u64 >> 33), irs->reg, 0);
  556. ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 31), irs->reg, 0);
  557. if ((n->num.u64 >> 2) & 0x7fffffff)
  558. ir_emit_insn(prog->ir,
  559. ALU64_IMM(BPF_OR, (n->num.u64 >> 2) & 0x7fffffff),
  560. irs->reg, 0);
  561. ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 2), irs->reg, 0);
  562. if (n->num.u64 & 0x3)
  563. ir_emit_insn(prog->ir, ALU64_IMM(BPF_OR, n->num.u64 & 0x3),
  564. irs->reg, 0);
  565. } else if (n->num.u64 > 0x7fffffff) {
  566. ir_emit_insn(prog->ir, MOV64_IMM(n->num.u64 >> 31), irs->reg, 0);
  567. ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 31), irs->reg, 0);
  568. if (n->num.u64 & 0x7fffffff)
  569. ir_emit_insn(prog->ir,
  570. ALU64_IMM(BPF_OR, n->num.u64 & 0x7fffffff),
  571. irs->reg, 0);
  572. }
  573. return 0;
  574. }
  575. static const struct func global_num_func = {
  576. .name = ":num",
  577. .ir_post = global_num_ir_post,
  578. };
  579. static const struct func global_string_func = {
  580. .name = ":string",
  581. .type = &t_string,
  582. .static_ret = 1,
  583. };
  584. static const struct func global_ident_func = {
  585. .name = ":ident",
  586. };
  587. static const struct func *global_sym_alloc_expr(struct node *n)
  588. {
  589. const struct func *func;
  590. int err;
  591. for (func = global_funcs; func->name; func++) {
  592. if (strcmp(func->name, n->expr.func))
  593. continue;
  594. return func;
  595. }
  596. return NULL;
  597. }
  598. int global_sym_alloc(struct prog *prog, struct node *n)
  599. {
  600. const struct func *func;
  601. struct symtab *st = prog->locals;
  602. int err;
  603. switch (n->ntype) {
  604. case N_EXPR:
  605. func = global_sym_alloc_expr(n);
  606. break;
  607. case N_IDENT:
  608. st = prog->globals;
  609. func = &global_ident_func;
  610. break;
  611. case N_NUM:
  612. func = &global_num_func;
  613. break;
  614. case N_STRING:
  615. func = &global_string_func;
  616. break;
  617. }
  618. if (!func)
  619. return -ENOENT;
  620. err = func_static_validate(func, n);
  621. if (err)
  622. return err;
  623. n->sym = sym_alloc(st, n, func);
  624. if (n->ntype == N_NUM)
  625. n->sym->type = global_num_type(n);
  626. else if (func->static_ret)
  627. n->sym->type = func_return_type(func);
  628. return 0;
  629. }
  630. int global_probe(struct prog *prog)
  631. {
  632. return 0;
  633. }
  634. struct provider global = {
  635. .name = ":",
  636. .sym_alloc = global_sym_alloc,
  637. .probe = global_probe,
  638. };
  639. __attribute__((constructor))
  640. static void global_init(void)
  641. {
  642. provider_register(&global);
  643. }