| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920 |
- #define _GNU_SOURCE /* asprintf */
- #include <assert.h>
- #include <errno.h>
- #include <limits.h>
- #include <stdlib.h>
- #include <string.h>
- #include "arch.h"
- #include "func.h"
- #include "node.h"
- #include "ply.h"
- #include "sym.h"
- #include "type.h"
- /* . */
- static int global_dot_ir_pre(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct node *sou = n->expr.args;
- if (node_is(sou, ":deref")) {
- /* (*ptr).member, if *ptr is not already loaded let it
- * know that we're only interested in one member */
- sou->sym->irs.hint.dot = 1;
- /* this also means we need to put ourselves on the
- * stack since data will be loaded via probe_read */
- n->sym->irs.hint.stack = 1;
- }
- return 0;
- }
- static int global_dot_ir_post(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct node *sou, *member;
- struct irstate *dst;
- ssize_t offset;
- sou = n->expr.args;
- member = sou->next;
- dst = &n->sym->irs;
-
- ir_init_sym(prog->ir, n->sym);
- offset = type_offsetof(type_base(sou->sym->type), member->string.data);
- assert(offset >= 0);
- if (!sou->sym->irs.loc) {
- /* sou is a :deref which wasn't loaded by child, just
- * read the member we're interested in. */
- struct node *ptr = sou->expr.args;
- ir_emit_sym_to_reg(prog->ir, BPF_REG_3, ptr->sym);
- ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, offset), BPF_REG_3, 0);
- goto probe_read;
- }
- offset += sou->sym->irs.stack;
- if (dst->loc == LOC_REG) {
- ir_emit_insn(prog->ir, LDX(bpf_width(dst->size), offset),
- dst->reg, BPF_REG_BP);
- return 0;
- }
- ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, offset), BPF_REG_3, 0);
- probe_read:
- ir_emit_insn(prog->ir, MOV_IMM((int32_t)dst->size), BPF_REG_2, 0);
- ir_emit_insn(prog->ir, MOV, BPF_REG_1, BPF_REG_BP);
- ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, dst->stack), BPF_REG_1, 0);
- ir_emit_insn(prog->ir, CALL(BPF_FUNC_probe_read), 0, 0);
- /* TODO if (r0) exit(r0); */
- return 0;
- }
- static int global_dot_type_infer(const struct func *func, struct node *n)
- {
- struct node *sou, *member;
- struct type *t;
- struct tfield *f;
- if (n->sym->type)
- return 0;
- sou = n->expr.args;
- member = sou->next;
- if (!sou->sym->type)
- return 0;
- t = type_base(sou->sym->type);
- /* TODO: add union */
- if (t->ttype != T_STRUCT) {
- _e("%#N: %N is neither struct nor union (type '%T').\n",
- n, sou, sou->sym->type);
- return -EINVAL;
- }
- f = tfields_get(t->sou.fields, member->string.data);
- if (!f) {
- _e("%#N: type '%T' has no member named %N.\n", n, t, member);
- return -EINVAL;
- }
- /* given `sou.member` where sou is a struct/union, infer that
- * the expression's type is equal to member's type. */
- n->sym->type = f->type;
- return 0;
- }
- /* :deref */
- static int global_deref_ir_post(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct node *ptr = n->expr.args;
- struct irstate *dst;
- size_t size;
- dst = &n->sym->irs;
- if (dst->hint.dot)
- /* (*ptr).member, ptr points to a struct and our
- * parent is only interested in one member. don't load
- * the struct, let the dot operaton steal the address
- * from our argument */
- return 0;
- ir_init_sym(prog->ir, n->sym);
- if (dst->hint.lval)
- /* *ptr = val, whatever is in our storage now it will
- be overwritten, so skip the load. */
- return 0;
- ir_emit_sym_to_reg(prog->ir, BPF_REG_3, ptr->sym);
- ir_emit_read_to_sym(prog->ir, n->sym, BPF_REG_3);
- return 0;
- }
- static int global_deref_type_infer(const struct func *func, struct node *n)
- {
- struct node *ptr = n->expr.args;
- struct type *t;
- if (n->sym->type || !ptr->sym->type)
- return 0;
- t = type_base(ptr->sym->type);
- if (t->ttype != T_POINTER) {
- _e("%#N: can't dereference %N (type '%T').\n",
- n, ptr, ptr->sym->type);
- return -EINVAL;
- }
- /* given `*p` where p is a pointer, infer that the
- * expression's type is equal to p's concrete type. */
- n->sym->type = t->ptr.type;
- return 0;
- }
- /* :map */
- static int map_ir_update(struct node *n, struct prog *prog)
- {
- struct node *map = n->expr.args;
- ir_emit_ldmap(prog->ir, BPF_REG_1, map->sym);
- ir_emit_insn(prog->ir, MOV, BPF_REG_2, BPF_REG_BP);
- ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, map->sym->irs.stack), BPF_REG_2, 0);
- ir_emit_insn(prog->ir, MOV, BPF_REG_3, BPF_REG_BP);
- ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, n->sym->irs.stack), BPF_REG_3, 0);
- ir_emit_insn(prog->ir, MOV_IMM(0), BPF_REG_4, 0);
- ir_emit_insn(prog->ir, CALL(BPF_FUNC_map_update_elem), 0, 0);
- /* TODO: if (r0) exit(r0); */
- return 0;
- }
- static int map_ir_pre_key(struct node *n, struct prog *prog)
- {
- struct node *map = n->expr.args, *arg;
- struct type *ktype = type_base(map->sym->type->map.ktype);
- ssize_t stack = map->sym->irs.stack;
- size_t offset, size, pad;
- struct tfield *f;
- arg = map->next;
- tfields_foreach(f, ktype->sou.fields) {
- offset = type_offsetof(ktype, f->name);
- size = type_sizeof(f->type);
- if (!arg->sym->irs.loc) {
- arg->sym->irs.hint.stack = 1;
- arg->sym->irs.stack = stack + offset;
- }
- if (arg->next) {
- pad = type_offsetof(ktype, f[1].name) - (offset + size);
- if (pad)
- ir_emit_bzero(prog->ir,
- stack + offset + size, pad);
- }
- arg = arg->next;
- }
- pad = type_sizeof(ktype) - (offset + size);
- if (pad)
- ir_emit_bzero(prog->ir, stack + offset + size, pad);
- return 0;
- }
- static int map_ir_pre(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct irstate *kirs;
- struct node *map = n->expr.args;
- struct type *ktype = type_base(map->sym->type->map.ktype);
- map->sym->irs.hint.stack = 1;
- ir_init_irs(prog->ir, &map->sym->irs, ktype);
- if (ktype->ttype == T_STRUCT)
- return map_ir_pre_key(n, prog);
- kirs = &map->next->sym->irs;
- if (!kirs->loc) {
- kirs->hint.stack = 1;
- kirs->stack = map->sym->irs.stack;
- }
- return 0;
- }
- static int map_ir_post(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct node *map = n->expr.args, *arg;
- struct type *ktype = type_base(map->sym->type->map.ktype);
- ssize_t stack = map->sym->irs.stack;
- size_t offset;
- struct tfield *f;
- int16_t lmiss, lhit;
- arg = map->next;
- if (ktype->ttype == T_STRUCT) {
- tfields_foreach(f, ktype->sou.fields) {
- offset = type_offsetof(ktype, f->name);
- ir_emit_sym_to_stack(prog->ir, stack + offset, arg->sym);
- arg = arg->next;
- }
- } else {
- ir_emit_sym_to_stack(prog->ir, stack, arg->sym);
- assert(!arg->next);
- }
- n->sym->irs.hint.stack = 1;
- ir_init_sym(prog->ir, n->sym);
- if (n->sym->irs.hint.lval)
- /* map[key] = val, whatever is in our storage now it
- will be overwritten, so skip the load. */
- return 0;
- ir_emit_ldmap(prog->ir, BPF_REG_1, map->sym);
- ir_emit_insn(prog->ir, MOV, BPF_REG_2, BPF_REG_BP);
- ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, stack), BPF_REG_BP, 0);
- ir_emit_insn(prog->ir, CALL(BPF_FUNC_map_lookup_elem), 0, 0);
- lmiss = ir_alloc_label(prog->ir);
- lhit = ir_alloc_label(prog->ir);
- ir_emit_insn(prog->ir, JMP_IMM(BPF_JEQ, 0, lmiss), BPF_REG_0, 0);
- ir_emit_read_to_sym(prog->ir, n->sym, BPF_REG_0);
- ir_emit_insn(prog->ir, JMP(BPF_JA, lhit), 0, 0);
- ir_emit_label(prog->ir, lmiss);
- ir_emit_bzero(prog->ir, n->sym->irs.stack, n->sym->irs.size);
-
- ir_emit_label(prog->ir, lhit);
- return 0;
- }
- static struct type *map_key_type(struct node *n)
- {
- struct node *map, *key;
- struct type *ktype;
- struct tfield *kfields, *f;
- int i, nargs = node_nargs(n);
- char *kname;
- map = n->expr.args;
- if (nargs == 2)
- return map->next->sym->type;
- ktype = calloc(1, sizeof(*ktype));
- assert(ktype);
- kfields = calloc(nargs, sizeof(*kfields));
- assert(kfields);
- for (key = map->next, f = kfields, i = 0; key; key = key->next, f++, i++) {
- asprintf(&f->name, "k%d", i);
- f->type = key->sym->type;
- }
- asprintf(&ktype->sou.name, ":%s_key", map->ident.name);
- ktype->ttype = T_STRUCT;
- ktype->sou.fields = kfields;
- type_add(ktype);
- return ktype;
- }
- static int map_type_validate(struct node *n)
- {
- /* TODO */
- return 0;
- }
- static int map_type_infer(const struct func *func, struct node *n)
- {
- struct node *map, *key;
- struct type *ktype;
- map = n->expr.args;
- if (!map->sym)
- return 0;
- if (map->sym->type) {
- if (!n->sym->type)
- /* given `m[key]` where m's type is known,
- * infer that the expression's type is equal
- * to m's value type. */
- n->sym->type = map->sym->type->map.vtype;
- return map_type_validate(n);
- }
- if (!n->sym->type)
- return 0;
- for (key = map->next; key; key = key->next) {
- if (type_sizeof(key->sym->type) < 0)
- return 0;
- }
- map->sym->type = type_map_of(map_key_type(n), n->sym->type);
- return 0;
- }
- static int map_static_validate(const struct func *func, struct node *n)
- {
- if (n->expr.args->ntype != N_IDENT) {
- _e("%#N: can't lookup a key in %N, which is not a map.\n",
- n, n);
- return -EINVAL;
- }
- return 0;
- }
- /* :assign */
- static int global_assign_ir_pre(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct node *lval, *rval;
- lval = n->expr.args;
- rval = lval->next;
- n->sym->irs.hint.stack = 1;
- ir_init_irs(prog->ir, &n->sym->irs, lval->sym->type);
- lval->sym->irs.hint.lval = 1;
- lval->sym->irs.hint.stack = 1;
- lval->sym->irs.stack = n->sym->irs.stack;
- rval->sym->irs.hint.stack = 1;
- rval->sym->irs.stack = n->sym->irs.stack;
- return 0;
- }
- static int global_assign_ir_post(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct node *lval, *rval;
- lval = n->expr.args;
- rval = lval->next;
- ir_emit_sym_to_sym(prog->ir, lval->sym, rval->sym);
- if (!node_is(lval, "{}"))
- return 0;
- return map_ir_update(lval, prog);
- }
- static int global_assign_type_infer(const struct func *func, struct node *n)
- {
- struct node *lval, *rval;
- int err;
- if (n->sym->type)
- return 0;
- lval = n->expr.args;
- rval = lval->next;
- if (!rval->sym->type)
- return 0;
- if (!lval->sym->type) {
- /* given `a = b` where b's type is known but not a's,
- * infer that a's type must be equal to b's */
- lval->sym->type = rval->sym->type;
- /* TODO do we need assignment expressions? */
- n->sym->type = &t_void;
-
- if (!node_is(lval, "{}"))
- return 0;
- err = map_type_infer(lval->sym->func, lval);
- if (err)
- return err;
- }
- if (type_compatible(lval->sym->type, rval->sym->type))
- return 0;
- _e("%#N: can't assign %N (type '%T'), to %N (type '%T').\n",
- n, rval, rval->sym->type, lval, lval->sym->type);
- return -EINVAL;
- }
- static int global_assign_static_validate(const struct func *func, struct node *n)
- {
- struct node *lval;
- lval = n->expr.args;
- if (node_is(lval, "{}") || (lval->ntype == N_IDENT))
- return 0;
- _e("%#N: can't assign a value to %N.\n", n, lval);
- return -EINVAL;
- }
- /* :binop */
- static int global_binop_type_infer(const struct func *func, struct node *n)
- {
- struct node *lval, *rval;
- if (n->sym->type)
- return 0;
- lval = n->expr.args;
- rval = lval->next;
- if (!lval->sym->type || !rval->sym->type)
- return 0;
- if (type_equal(lval->sym->type, rval->sym->type)) {
- n->sym->type = lval->sym->type;
- return 0;
- }
- /* TODO handle integer promotion */
- return 0;
- }
- /* count() */
- struct type t_count = {
- .ttype = T_SCALAR,
- .scalar = {
- .name = ":count",
- .size = sizeof(unsigned long),
- .is_signed = 0,
- },
- .aggregation = 1,
- };
- static int count_ir_post(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct node *mapop = n->expr.args;
- ir_emit_sym_to_reg(prog->ir, BPF_REG_0, mapop->sym);
- ir_emit_insn(prog->ir, ALU_IMM(BPF_ADD, 1), BPF_REG_0, 0);
- ir_emit_reg_to_sym(prog->ir, mapop->sym, BPF_REG_0);
- return map_ir_update(mapop, prog);
- }
- static int count_type_infer(const struct func *func, struct node *n)
- {
- struct node *mapop;
- struct type *t;
- mapop = n->expr.args;
- if (n->sym->type)
- return 0;
- n->sym->type = &t_void;
- if (!mapop->sym->type) {
- mapop->sym->type = &t_count;
- return map_type_infer(mapop->sym->func, mapop);
- }
- return 0;
- }
- static int count_static_validate(const struct func *func, struct node *n)
- {
- struct node *mapop;
- mapop = n->expr.args;
- if (node_is(mapop, "{}"))
- return 0;
- _e("%#N: aggregation target must be a map, %N is not.\n", n, mapop);
- return -EINVAL;
- }
- /* quantize */
- static int global_quantize_type_infer(const struct func *func, struct node *n)
- {
- struct node *arg;
- struct type *t;
- arg = n->expr.args;
- if (n->sym->type || !arg->sym->type)
- return 0;
- t = type_base(arg->sym->type);
- if (t->ttype != T_SCALAR) {
- _e("%#N: can't quantize non-scalar value %N (type '%T').\n",
- n, arg, arg->sym->type);
- return -EINVAL;
- }
- n->sym->type = type_array_of(arg->sym->type, type_sizeof(t) * 8);
- return 0;
- }
- /* pid */
- static int global_pid_ir_post(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct node *ptr = n->expr.args;
- ir_init_sym(prog->ir, n->sym);
- ir_emit_insn(prog->ir, CALL(BPF_FUNC_get_current_pid_tgid), 0, 0);
- ir_emit_insn(prog->ir, ALU64_IMM(BPF_RSH, 32), BPF_REG_0, 0);
- ir_emit_reg_to_sym(prog->ir, n->sym, BPF_REG_0);
- return 0;
- }
- struct type t_pid = {
- .ttype = T_TYPEDEF,
- .tdef = { .name = ":pid", .type = &t_u32 },
- };
- struct type t_pid_func = {
- .ttype = T_FUNC,
- .func = { .type = &t_pid },
- };
- /* time */
- static int global_time_ir_post(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct node *ptr = n->expr.args;
- ir_init_sym(prog->ir, n->sym);
- ir_emit_insn(prog->ir, CALL(BPF_FUNC_ktime_get_ns), 0, 0);
- ir_emit_reg_to_sym(prog->ir, n->sym, BPF_REG_0);
- return 0;
- }
- struct type t_time = {
- .ttype = T_TYPEDEF, /* TODO: should be a T_FUNC with a static
- * signature */
- .tdef = { .name = ":time", .type = &t_s64 },
- };
- struct type t_time_func = {
- .ttype = T_FUNC,
- .func = { .type = &t_time },
- };
- /* */
- struct type t_block_func = {
- .ttype = T_FUNC,
- .func = { .type = &t_void, .vargs = 1 },
- };
- struct type t_string_array = {
- .ttype = T_ARRAY,
- .array = { .type = &t_char, .len = 64 }, /* TODO: tunable */
- };
- struct type t_string = {
- .ttype = T_TYPEDEF,
- .tdef = { .name = ":string", .type = &t_string_array },
- };
- struct tfield f_dot[] = {
- { .type = &t_void },
- { .type = &t_string },
- { .type = NULL }
- };
- struct type t_dot_func = {
- .ttype = T_FUNC,
- .func = { .type = &t_void, .args = f_dot },
- };
- struct tfield f_2args[] = {
- { .type = &t_void },
- { .type = &t_void },
- { .type = NULL }
- };
- struct type t_2args_func = {
- .ttype = T_FUNC,
- .func = { .type = &t_void, .args = f_2args },
- };
- struct tfield f_1arg[] = {
- { .type = &t_void },
- { .type = NULL }
- };
- struct type t_1arg_func = {
- .ttype = T_FUNC,
- .func = { .type = &t_void, .args = f_1arg },
- };
- static const struct func global_funcs[] = {
- {
- .name = ":block",
- .type = &t_block_func,
- .static_ret = 1,
- },
- {
- .name = ".",
- .type = &t_dot_func,
- .type_infer = global_dot_type_infer,
- .ir_pre = global_dot_ir_pre,
- .ir_post = global_dot_ir_post,
- },
- {
- .name = ":deref",
- .type = &t_1arg_func,
- .type_infer = global_deref_type_infer,
- .ir_post = global_deref_ir_post,
- },
- {
- .name = "+",
- .type = &t_2args_func,
- .type_infer = global_binop_type_infer,
- },
- {
- .name = "-",
- .type = &t_2args_func,
- .type_infer = global_binop_type_infer,
- },
-
- {
- .name = "=",
- .type = &t_2args_func,
- .type_infer = global_assign_type_infer,
- .static_validate = global_assign_static_validate,
- .ir_pre = global_assign_ir_pre,
- .ir_post = global_assign_ir_post,
- },
- {
- .name = "{}",
- /* .type = t_map_func, */
- .type_infer = map_type_infer,
- .static_validate = map_static_validate,
- .ir_pre = map_ir_pre,
- .ir_post = map_ir_post,
- },
- {
- .name = "count",
- .type = &t_1arg_func,
- .static_validate = count_static_validate,
- .type_infer = count_type_infer,
- .ir_post = count_ir_post,
- },
- {
- .name = "pid",
- .type = &t_pid_func,
- .static_ret = 1,
- .ir_post = global_pid_ir_post,
- },
- {
- .name = "time",
- .type = &t_time_func,
- .static_ret = 1,
- .ir_post = global_time_ir_post,
- },
- {
- .name = "quantize",
- .type = &t_1arg_func,
- .type_infer = global_quantize_type_infer,
- },
-
- { .name = NULL }
- };
- static struct type *global_num_type(struct node *n)
- {
- if (n->num.unsignd) {
- if (n->num.u64 <= INT_MAX)
- return &t_int;
- else if (n->num.u64 <= UINT_MAX)
- return &t_uint;
- else if (n->num.u64 <= LONG_MAX)
- return &t_long;
- else if (n->num.u64 <= ULONG_MAX)
- return &t_ulong;
- else if (n->num.u64 <= LLONG_MAX)
- return &t_llong;
- else if (n->num.u64 <= ULLONG_MAX)
- return &t_ullong;
- } else {
- if (n->num.s64 >= INT_MIN && n->num.s64 <= INT_MAX)
- return &t_int;
- else if (n->num.s64 >= LONG_MIN && n->num.s64 <= LONG_MAX)
- return &t_long;
- else if (n->num.s64 >= LLONG_MIN && n->num.s64 <= LLONG_MAX)
- return &t_llong;
- }
- assert(0);
- return NULL;
- }
- static int global_num_ir_post(const struct func *func, struct node *n,
- struct prog *prog)
- {
- struct irstate *irs = &n->sym->irs;
- if ((n->num.unsignd && (n->num.u64 <= INT32_MAX)) ||
- (n->num.s64 >= INT32_MIN && n->num.s64 <= INT32_MAX)) {
- irs->loc = LOC_IMM;
- irs->imm = n->num.s64;
- irs->size = 4;
- return 0;
- }
- /* we need to build the constant in a register, so ignore any
- * advise about stack allocation. */
- irs->hint.stack = 0;
- ir_init_sym(prog->ir, n->sym);
- if (n->num.u64 > 0x3fffffffffffffff) {
- ir_emit_insn(prog->ir, MOV64_IMM(n->num.u64 >> 33), irs->reg, 0);
- ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 31), irs->reg, 0);
- if ((n->num.u64 >> 2) & 0x7fffffff)
- ir_emit_insn(prog->ir,
- ALU64_IMM(BPF_OR, (n->num.u64 >> 2) & 0x7fffffff),
- irs->reg, 0);
- ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 2), irs->reg, 0);
- if (n->num.u64 & 0x3)
- ir_emit_insn(prog->ir, ALU64_IMM(BPF_OR, n->num.u64 & 0x3),
- irs->reg, 0);
- } else if (n->num.u64 > 0x7fffffff) {
- ir_emit_insn(prog->ir, MOV64_IMM(n->num.u64 >> 31), irs->reg, 0);
- ir_emit_insn(prog->ir, ALU64_IMM(BPF_LSH, 31), irs->reg, 0);
- if (n->num.u64 & 0x7fffffff)
- ir_emit_insn(prog->ir,
- ALU64_IMM(BPF_OR, n->num.u64 & 0x7fffffff),
- irs->reg, 0);
- }
- return 0;
- }
- static const struct func global_num_func = {
- .name = ":num",
- .ir_post = global_num_ir_post,
- };
- static const struct func global_string_func = {
- .name = ":string",
- .type = &t_string,
- .static_ret = 1,
- };
- static const struct func global_ident_func = {
- .name = ":ident",
- };
- static const struct func *global_sym_alloc_expr(struct node *n)
- {
- const struct func *func;
- int err;
- for (func = global_funcs; func->name; func++) {
- if (strcmp(func->name, n->expr.func))
- continue;
- return func;
- }
- return NULL;
- }
- int global_sym_alloc(struct prog *prog, struct node *n)
- {
- const struct func *func;
- struct symtab *st = prog->locals;
- int err;
- switch (n->ntype) {
- case N_EXPR:
- func = global_sym_alloc_expr(n);
- break;
- case N_IDENT:
- st = prog->globals;
- func = &global_ident_func;
- break;
- case N_NUM:
- func = &global_num_func;
- break;
- case N_STRING:
- func = &global_string_func;
- break;
- }
- if (!func)
- return -ENOENT;
- err = func_static_validate(func, n);
- if (err)
- return err;
- n->sym = sym_alloc(st, n, func);
- if (n->ntype == N_NUM)
- n->sym->type = global_num_type(n);
- else if (func->static_ret)
- n->sym->type = func_return_type(func);
- return 0;
- }
- int global_probe(struct prog *prog)
- {
- return 0;
- }
- struct provider global = {
- .name = ":",
- .sym_alloc = global_sym_alloc,
- .probe = global_probe,
- };
- __attribute__((constructor))
- static void global_init(void)
- {
- provider_register(&global);
- }
|