X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_bpf%2Fbpf_validate.c;h=9214f150385f065649d0cab7f75565f6d87a89dc;hb=383fb5a9c7f86931720a389335268b32fb9373a5;hp=83983efc4e5c4df90143ad41b2c45fd4f0a3d85d;hpb=8021917293d03d83d55e7ee4c40dada6b39cae44;p=dpdk.git diff --git a/lib/librte_bpf/bpf_validate.c b/lib/librte_bpf/bpf_validate.c index 83983efc4e..9214f15038 100644 --- a/lib/librte_bpf/bpf_validate.c +++ b/lib/librte_bpf/bpf_validate.c @@ -15,6 +15,8 @@ #include "bpf_impl.h" +#define BPF_ARG_PTR_STACK RTE_BPF_ARG_RESERVED + struct bpf_reg_val { struct rte_bpf_arg v; uint64_t mask; @@ -68,6 +70,7 @@ struct bpf_verifier { uint64_t stack_sz; uint32_t nb_nodes; uint32_t nb_jcc_nodes; + uint32_t nb_ldmb_nodes; uint32_t node_colour[MAX_NODE_COLOUR]; uint32_t edge_type[MAX_EDGE_TYPE]; struct bpf_eval_state *evst; @@ -100,6 +103,9 @@ struct bpf_ins_check { #define WRT_REGS RTE_LEN2MASK(EBPF_REG_10, uint16_t) #define ZERO_REG RTE_LEN2MASK(EBPF_REG_1, uint16_t) +/* For LD_IND R6 is an implicit CTX register. */ +#define IND_SRC_REGS (WRT_REGS ^ 1 << EBPF_REG_6) + /* * check and evaluate functions for particular instruction types. */ @@ -224,7 +230,7 @@ eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk) struct bpf_reg_val rv; rv.u.min = (rd->u.min + rs->u.min) & msk; - rv.u.max = (rd->u.min + rs->u.max) & msk; + rv.u.max = (rd->u.max + rs->u.max) & msk; rv.s.min = (rd->s.min + rs->s.min) & msk; rv.s.max = (rd->s.max + rs->s.max) & msk; @@ -252,10 +258,10 @@ eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk) { struct bpf_reg_val rv; - rv.u.min = (rd->u.min - rs->u.min) & msk; - rv.u.max = (rd->u.min - rs->u.max) & msk; - rv.s.min = (rd->s.min - rs->s.min) & msk; - rv.s.max = (rd->s.max - rs->s.max) & msk; + rv.u.min = (rd->u.min - rs->u.max) & msk; + rv.u.max = (rd->u.max - rs->u.min) & msk; + rv.s.min = (rd->s.min - rs->s.max) & msk; + rv.s.max = (rd->s.max - rs->s.min) & msk; /* * if at least one of the operands is not constant, @@ -578,6 +584,42 @@ eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk) rd->s.min = RTE_MIN(sx, sy); } +static const char * +eval_ld_mbuf(struct bpf_verifier *bvf, const struct ebpf_insn *ins) +{ + uint32_t i, mode; + struct bpf_reg_val *rv, ri, rs; + + mode = BPF_MODE(ins->code); + + /* R6 is an implicit input that must contain pointer to mbuf */ + if (bvf->evst->rv[EBPF_REG_6].v.type != RTE_BPF_ARG_PTR_MBUF) + return "invalid type for implicit ctx register"; + + if (mode == BPF_IND) { + rs = bvf->evst->rv[ins->src_reg]; + if (rs.v.type != RTE_BPF_ARG_RAW) + return "unexpected type for src register"; + + eval_fill_imm(&ri, UINT64_MAX, ins->imm); + eval_add(&rs, &ri, UINT64_MAX); + + if (rs.s.max < 0 || rs.u.min > UINT32_MAX) + return "mbuf boundary violation"; + } + + /* R1-R5 scratch registers */ + for (i = EBPF_REG_1; i != EBPF_REG_6; i++) + bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF; + + /* R0 is an implicit output, contains data fetched from the packet */ + rv = bvf->evst->rv + EBPF_REG_0; + rv->v.size = bpf_size(BPF_SIZE(ins->code)); + eval_fill_max_bound(rv, RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t)); + + return NULL; +} + /* * check that destination and source operand are in defined state. */ @@ -710,7 +752,7 @@ eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz, if (rm->u.max % align != 0) return "unaligned memory access"; - if (rm->v.type == RTE_BPF_ARG_PTR_STACK) { + if (rm->v.type == BPF_ARG_PTR_STACK) { if (rm->u.max != rm->u.min || rm->s.max != rm->s.min || rm->u.max != (uint64_t)rm->s.max) @@ -764,7 +806,7 @@ eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins) if (err != NULL) return err; - if (rs.v.type == RTE_BPF_ARG_PTR_STACK) { + if (rs.v.type == BPF_ARG_PTR_STACK) { sv = st->sv + rs.u.max / sizeof(uint64_t); if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk) @@ -859,7 +901,7 @@ eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins) if (err != NULL) return err; - if (rd.v.type == RTE_BPF_ARG_PTR_STACK) { + if (rd.v.type == BPF_ARG_PTR_STACK) { sv = st->sv + rd.u.max / sizeof(uint64_t); if (BPF_CLASS(ins->code) == BPF_STX && @@ -908,7 +950,7 @@ eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg, * pointer to the variable on the stack is passed * as an argument, mark stack space it occupies as initialized. */ - if (err == NULL && rv->v.type == RTE_BPF_ARG_PTR_STACK) { + if (err == NULL && rv->v.type == BPF_ARG_PTR_STACK) { i = rv->u.max / sizeof(uint64_t); n = i + arg->size / sizeof(uint64_t); @@ -925,7 +967,6 @@ eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg, static const char * eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins) { - uint64_t msk; uint32_t i, idx; struct bpf_reg_val *rv; const struct rte_bpf_xsym *xsym; @@ -958,10 +999,11 @@ eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins) rv = bvf->evst->rv + EBPF_REG_0; rv->v = xsym->func.ret; - msk = (rv->v.type == RTE_BPF_ARG_RAW) ? - RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t) : UINTPTR_MAX; - eval_max_bound(rv, msk); - rv->mask = msk; + if (rv->v.type == RTE_BPF_ARG_RAW) + eval_fill_max_bound(rv, + RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t)); + else if (RTE_BPF_ARG_PTR_TYPE(rv->v.type) != 0) + eval_fill_imm64(rv, UINTPTR_MAX, 0); return err; } @@ -1084,7 +1126,7 @@ eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins) /* * validate parameters for each instruction type. */ -static const struct bpf_ins_check ins_chk[UINT8_MAX] = { +static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = { /* ALU IMM 32-bit instructions */ [(BPF_ALU | BPF_ADD | BPF_K)] = { .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG}, @@ -1423,6 +1465,44 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = { .imm = { .min = 0, .max = UINT32_MAX}, .eval = eval_ld_imm64, }, + /* load absolute instructions */ + [(BPF_LD | BPF_ABS | BPF_B)] = { + .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG}, + .off = { .min = 0, .max = 0}, + .imm = { .min = 0, .max = INT32_MAX}, + .eval = eval_ld_mbuf, + }, + [(BPF_LD | BPF_ABS | BPF_H)] = { + .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG}, + .off = { .min = 0, .max = 0}, + .imm = { .min = 0, .max = INT32_MAX}, + .eval = eval_ld_mbuf, + }, + [(BPF_LD | BPF_ABS | BPF_W)] = { + .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG}, + .off = { .min = 0, .max = 0}, + .imm = { .min = 0, .max = INT32_MAX}, + .eval = eval_ld_mbuf, + }, + /* load indirect instructions */ + [(BPF_LD | BPF_IND | BPF_B)] = { + .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS}, + .off = { .min = 0, .max = 0}, + .imm = { .min = 0, .max = UINT32_MAX}, + .eval = eval_ld_mbuf, + }, + [(BPF_LD | BPF_IND | BPF_H)] = { + .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS}, + .off = { .min = 0, .max = 0}, + .imm = { .min = 0, .max = UINT32_MAX}, + .eval = eval_ld_mbuf, + }, + [(BPF_LD | BPF_IND | BPF_W)] = { + .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS}, + .off = { .min = 0, .max = 0}, + .imm = { .min = 0, .max = UINT32_MAX}, + .eval = eval_ld_mbuf, + }, /* store REG instructions */ [(BPF_STX | BPF_MEM | BPF_B)] = { .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS}, @@ -1941,6 +2021,14 @@ validate(struct bpf_verifier *bvf) rc |= add_edge(bvf, node, i + 2); i++; break; + case (BPF_LD | BPF_ABS | BPF_B): + case (BPF_LD | BPF_ABS | BPF_H): + case (BPF_LD | BPF_ABS | BPF_W): + case (BPF_LD | BPF_IND | BPF_B): + case (BPF_LD | BPF_IND | BPF_H): + case (BPF_LD | BPF_IND | BPF_W): + bvf->nb_ldmb_nodes++; + /* fallthrough */ default: rc |= add_edge(bvf, node, i + 1); break; @@ -2131,7 +2219,7 @@ evaluate(struct bpf_verifier *bvf) /* initial state of frame pointer */ static const struct bpf_reg_val rvfp = { .v = { - .type = RTE_BPF_ARG_PTR_STACK, + .type = BPF_ARG_PTR_STACK, .size = MAX_BPF_STACK_SIZE, }, .mask = UINT64_MAX, @@ -2241,8 +2329,14 @@ bpf_validate(struct rte_bpf *bpf) free(bvf.in); /* copy collected info */ - if (rc == 0) + if (rc == 0) { bpf->stack_sz = bvf.stack_sz; + /* for LD_ABS/LD_IND, we'll need extra space on the stack */ + if (bvf.nb_ldmb_nodes != 0) + bpf->stack_sz = RTE_ALIGN_CEIL(bpf->stack_sz + + sizeof(uint64_t), sizeof(uint64_t)); + } + return rc; }