1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
12 #include <rte_common.h>
14 #include <rte_byteorder.h>
31 struct bpf_eval_state {
32 struct bpf_reg_val rv[EBPF_REG_NUM];
33 struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
36 /* possible instruction node colour */
44 /* possible edge types */
59 uint8_t edge_type[MAX_EDGES];
60 uint32_t edge_dest[MAX_EDGES];
62 struct bpf_eval_state *evst;
66 const struct rte_bpf_prm *prm;
70 uint32_t nb_jcc_nodes;
71 uint32_t node_colour[MAX_NODE_COLOUR];
72 uint32_t edge_type[MAX_EDGE_TYPE];
73 struct bpf_eval_state *evst;
74 struct inst_node *evin;
78 struct bpf_eval_state *ent;
82 struct bpf_ins_check {
95 const char * (*check)(const struct ebpf_insn *);
96 const char * (*eval)(struct bpf_verifier *, const struct ebpf_insn *);
99 #define ALL_REGS RTE_LEN2MASK(EBPF_REG_NUM, uint16_t)
100 #define WRT_REGS RTE_LEN2MASK(EBPF_REG_10, uint16_t)
101 #define ZERO_REG RTE_LEN2MASK(EBPF_REG_1, uint16_t)
104 * check and evaluate functions for particular instruction types.
108 check_alu_bele(const struct ebpf_insn *ins)
110 if (ins->imm != 16 && ins->imm != 32 && ins->imm != 64)
111 return "invalid imm field";
116 eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
119 if (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)
120 return "undefined return value";
124 /* setup max possible with this mask bounds */
126 eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)
133 eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)
135 rv->s.max = mask >> 1;
136 rv->s.min = rv->s.max ^ UINT64_MAX;
140 eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)
142 eval_umax_bound(rv, mask);
143 eval_smax_bound(rv, mask);
147 eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)
149 eval_max_bound(rv, mask);
150 rv->v.type = RTE_BPF_ARG_RAW;
155 eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)
165 eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)
169 v = (uint64_t)imm & mask;
171 rv->v.type = RTE_BPF_ARG_RAW;
172 eval_fill_imm64(rv, mask, v);
176 eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
180 struct bpf_reg_val *rd;
182 val = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;
184 rd = bvf->evst->rv + ins->dst_reg;
185 rd->v.type = RTE_BPF_ARG_RAW;
186 eval_fill_imm64(rd, UINT64_MAX, val);
188 for (i = 0; i != bvf->prm->nb_xsym; i++) {
190 /* load of external variable */
191 if (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&
192 (uintptr_t)bvf->prm->xsym[i].var.val == val) {
193 rd->v = bvf->prm->xsym[i].var.desc;
194 eval_fill_imm64(rd, UINT64_MAX, 0);
203 eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)
205 struct bpf_reg_val rt;
207 rt.u.min = rv->u.min & mask;
208 rt.u.max = rv->u.max & mask;
209 if (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {
210 rv->u.max = RTE_MAX(rt.u.max, mask);
214 eval_smax_bound(&rt, mask);
215 rv->s.max = RTE_MIN(rt.s.max, rv->s.max);
216 rv->s.min = RTE_MAX(rt.s.min, rv->s.min);
222 eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
224 struct bpf_reg_val rv;
226 rv.u.min = (rd->u.min + rs->u.min) & msk;
227 rv.u.max = (rd->u.min + rs->u.max) & msk;
228 rv.s.min = (rd->s.min + rs->s.min) & msk;
229 rv.s.max = (rd->s.max + rs->s.max) & msk;
232 * if at least one of the operands is not constant,
233 * then check for overflow
235 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
236 (rv.u.min < rd->u.min || rv.u.max < rd->u.max))
237 eval_umax_bound(&rv, msk);
239 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
240 (((rs->s.min < 0 && rv.s.min > rd->s.min) ||
241 rv.s.min < rd->s.min) ||
242 ((rs->s.max < 0 && rv.s.max > rd->s.max) ||
243 rv.s.max < rd->s.max)))
244 eval_smax_bound(&rv, msk);
251 eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
253 struct bpf_reg_val rv;
255 rv.u.min = (rd->u.min - rs->u.min) & msk;
256 rv.u.max = (rd->u.min - rs->u.max) & msk;
257 rv.s.min = (rd->s.min - rs->s.min) & msk;
258 rv.s.max = (rd->s.max - rs->s.max) & msk;
261 * if at least one of the operands is not constant,
262 * then check for overflow
264 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
265 (rv.u.min > rd->u.min || rv.u.max > rd->u.max))
266 eval_umax_bound(&rv, msk);
268 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
269 (((rs->s.min < 0 && rv.s.min < rd->s.min) ||
270 rv.s.min > rd->s.min) ||
271 ((rs->s.max < 0 && rv.s.max < rd->s.max) ||
272 rv.s.max > rd->s.max)))
273 eval_smax_bound(&rv, msk);
280 eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
283 /* check if shift value is less then max result bits */
284 if (rs->u.max >= opsz) {
285 eval_max_bound(rd, msk);
289 /* check for overflow */
290 if (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))
291 eval_umax_bound(rd, msk);
293 rd->u.max <<= rs->u.max;
294 rd->u.min <<= rs->u.min;
297 /* check that dreg values are and would remain always positive */
298 if ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=
299 RTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))
300 eval_smax_bound(rd, msk);
302 rd->s.max <<= rs->u.max;
303 rd->s.min <<= rs->u.min;
308 eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
311 /* check if shift value is less then max result bits */
312 if (rs->u.max >= opsz) {
313 eval_max_bound(rd, msk);
317 rd->u.max >>= rs->u.min;
318 rd->u.min >>= rs->u.max;
320 /* check that dreg values are always positive */
321 if ((uint64_t)rd->s.min >> (opsz - 1) != 0)
322 eval_smax_bound(rd, msk);
324 rd->s.max >>= rs->u.min;
325 rd->s.min >>= rs->u.max;
330 eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
335 /* check if shift value is less then max result bits */
336 if (rs->u.max >= opsz) {
337 eval_max_bound(rd, msk);
341 rd->u.max = (int64_t)rd->u.max >> rs->u.min;
342 rd->u.min = (int64_t)rd->u.min >> rs->u.max;
344 /* if we have 32-bit values - extend them to 64-bit */
345 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
353 rd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;
355 rd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;
358 rd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;
360 rd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;
364 eval_umax_bits(uint64_t v, size_t opsz)
369 v = __builtin_clzll(v);
370 return RTE_LEN2MASK(opsz - v, uint64_t);
373 /* estimate max possible value for (v1 & v2) */
375 eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)
377 v1 = eval_umax_bits(v1, opsz);
378 v2 = eval_umax_bits(v2, opsz);
382 /* estimate max possible value for (v1 | v2) */
384 eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)
386 v1 = eval_umax_bits(v1, opsz);
387 v2 = eval_umax_bits(v2, opsz);
392 eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
395 /* both operands are constants */
396 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
397 rd->u.min &= rs->u.min;
398 rd->u.max &= rs->u.max;
400 rd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);
401 rd->u.min &= rs->u.min;
404 /* both operands are constants */
405 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
406 rd->s.min &= rs->s.min;
407 rd->s.max &= rs->s.max;
408 /* at least one of operand is non-negative */
409 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
410 rd->s.max = eval_uand_max(rd->s.max & (msk >> 1),
411 rs->s.max & (msk >> 1), opsz);
412 rd->s.min &= rs->s.min;
414 eval_smax_bound(rd, msk);
418 eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
421 /* both operands are constants */
422 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
423 rd->u.min |= rs->u.min;
424 rd->u.max |= rs->u.max;
426 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
427 rd->u.min |= rs->u.min;
430 /* both operands are constants */
431 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
432 rd->s.min |= rs->s.min;
433 rd->s.max |= rs->s.max;
435 /* both operands are non-negative */
436 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
437 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
438 rd->s.min |= rs->s.min;
440 eval_smax_bound(rd, msk);
444 eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
447 /* both operands are constants */
448 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
449 rd->u.min ^= rs->u.min;
450 rd->u.max ^= rs->u.max;
452 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
456 /* both operands are constants */
457 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
458 rd->s.min ^= rs->s.min;
459 rd->s.max ^= rs->s.max;
461 /* both operands are non-negative */
462 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
463 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
466 eval_smax_bound(rd, msk);
470 eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
473 /* both operands are constants */
474 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
475 rd->u.min = (rd->u.min * rs->u.min) & msk;
476 rd->u.max = (rd->u.max * rs->u.max) & msk;
477 /* check for overflow */
478 } else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {
479 rd->u.max *= rs->u.max;
480 rd->u.min *= rd->u.min;
482 eval_umax_bound(rd, msk);
484 /* both operands are constants */
485 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
486 rd->s.min = (rd->s.min * rs->s.min) & msk;
487 rd->s.max = (rd->s.max * rs->s.max) & msk;
488 /* check that both operands are positive and no overflow */
489 } else if (rd->s.min >= 0 && rs->s.min >= 0) {
490 rd->s.max *= rs->s.max;
491 rd->s.min *= rd->s.min;
493 eval_smax_bound(rd, msk);
497 eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,
498 size_t opsz, uint64_t msk)
500 /* both operands are constants */
501 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
503 return "division by 0";
505 rd->u.min /= rs->u.min;
506 rd->u.max /= rs->u.max;
508 rd->u.min %= rs->u.min;
509 rd->u.max %= rs->u.max;
513 rd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);
515 rd->u.max = rd->u.max;
519 /* if we have 32-bit values - extend them to 64-bit */
520 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
521 rd->s.min = (int32_t)rd->s.min;
522 rd->s.max = (int32_t)rd->s.max;
523 rs->s.min = (int32_t)rs->s.min;
524 rs->s.max = (int32_t)rs->s.max;
527 /* both operands are constants */
528 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
530 return "division by 0";
532 rd->s.min /= rs->s.min;
533 rd->s.max /= rs->s.max;
535 rd->s.min %= rs->s.min;
536 rd->s.max %= rs->s.max;
538 } else if (op == BPF_MOD) {
539 rd->s.min = RTE_MAX(rd->s.max, 0);
540 rd->s.min = RTE_MIN(rd->s.min, 0);
542 eval_smax_bound(rd, msk);
551 eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
556 /* if we have 32-bit values - extend them to 64-bit */
557 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
558 rd->u.min = (int32_t)rd->u.min;
559 rd->u.max = (int32_t)rd->u.max;
562 ux = -(int64_t)rd->u.min & msk;
563 uy = -(int64_t)rd->u.max & msk;
565 rd->u.max = RTE_MAX(ux, uy);
566 rd->u.min = RTE_MIN(ux, uy);
568 /* if we have 32-bit values - extend them to 64-bit */
569 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
570 rd->s.min = (int32_t)rd->s.min;
571 rd->s.max = (int32_t)rd->s.max;
574 sx = -rd->s.min & msk;
575 sy = -rd->s.max & msk;
577 rd->s.max = RTE_MAX(sx, sy);
578 rd->s.min = RTE_MIN(sx, sy);
582 * check that destination and source operand are in defined state.
585 eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)
587 if (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)
588 return "dest reg value is undefined";
589 if (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)
590 return "src reg value is undefined";
595 eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
601 struct bpf_eval_state *st;
602 struct bpf_reg_val *rd, rs;
604 opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
605 sizeof(uint32_t) : sizeof(uint64_t);
606 opsz = opsz * CHAR_BIT;
607 msk = RTE_LEN2MASK(opsz, uint64_t);
610 rd = st->rv + ins->dst_reg;
612 if (BPF_SRC(ins->code) == BPF_X) {
613 rs = st->rv[ins->src_reg];
614 eval_apply_mask(&rs, msk);
616 eval_fill_imm(&rs, msk, ins->imm);
618 eval_apply_mask(rd, msk);
620 op = BPF_OP(ins->code);
622 err = eval_defined((op != EBPF_MOV) ? rd : NULL,
623 (op != BPF_NEG) ? &rs : NULL);
628 eval_add(rd, &rs, msk);
629 else if (op == BPF_SUB)
630 eval_sub(rd, &rs, msk);
631 else if (op == BPF_LSH)
632 eval_lsh(rd, &rs, opsz, msk);
633 else if (op == BPF_RSH)
634 eval_rsh(rd, &rs, opsz, msk);
635 else if (op == EBPF_ARSH)
636 eval_arsh(rd, &rs, opsz, msk);
637 else if (op == BPF_AND)
638 eval_and(rd, &rs, opsz, msk);
639 else if (op == BPF_OR)
640 eval_or(rd, &rs, opsz, msk);
641 else if (op == BPF_XOR)
642 eval_xor(rd, &rs, opsz, msk);
643 else if (op == BPF_MUL)
644 eval_mul(rd, &rs, opsz, msk);
645 else if (op == BPF_DIV || op == BPF_MOD)
646 err = eval_divmod(op, rd, &rs, opsz, msk);
647 else if (op == BPF_NEG)
648 eval_neg(rd, opsz, msk);
649 else if (op == EBPF_MOV)
652 eval_max_bound(rd, msk);
658 eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
661 struct bpf_eval_state *st;
662 struct bpf_reg_val *rd;
665 msk = RTE_LEN2MASK(ins->imm, uint64_t);
668 rd = st->rv + ins->dst_reg;
670 err = eval_defined(rd, NULL);
674 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
675 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))
676 eval_max_bound(rd, msk);
678 eval_apply_mask(rd, msk);
680 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))
681 eval_max_bound(rd, msk);
683 eval_apply_mask(rd, msk);
690 eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,
691 uint32_t align, int16_t off)
693 struct bpf_reg_val rv;
695 /* calculate reg + offset */
696 eval_fill_imm(&rv, rm->mask, off);
697 eval_add(rm, &rv, rm->mask);
699 if (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)
700 return "destination is not a pointer";
702 if (rm->mask != UINT64_MAX)
703 return "pointer truncation";
705 if (rm->u.max + opsz > rm->v.size ||
706 (uint64_t)rm->s.max + opsz > rm->v.size ||
708 return "memory boundary violation";
710 if (rm->u.max % align != 0)
711 return "unaligned memory access";
713 if (rm->v.type == RTE_BPF_ARG_PTR_STACK) {
715 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
716 rm->u.max != (uint64_t)rm->s.max)
717 return "stack access with variable offset";
719 bvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);
721 /* pointer to mbuf */
722 } else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {
724 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
725 rm->u.max != (uint64_t)rm->s.max)
726 return "mbuf access with variable offset";
733 eval_max_load(struct bpf_reg_val *rv, uint64_t mask)
735 eval_umax_bound(rv, mask);
737 /* full 64-bit load */
738 if (mask == UINT64_MAX)
739 eval_smax_bound(rv, mask);
741 /* zero-extend load */
742 rv->s.min = rv->u.min;
743 rv->s.max = rv->u.max;
748 eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
753 struct bpf_eval_state *st;
754 struct bpf_reg_val *rd, rs;
755 const struct bpf_reg_val *sv;
758 rd = st->rv + ins->dst_reg;
759 rs = st->rv[ins->src_reg];
760 opsz = bpf_size(BPF_SIZE(ins->code));
761 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
763 err = eval_ptr(bvf, &rs, opsz, 1, ins->off);
767 if (rs.v.type == RTE_BPF_ARG_PTR_STACK) {
769 sv = st->sv + rs.u.max / sizeof(uint64_t);
770 if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)
771 return "undefined value on the stack";
775 /* pointer to mbuf */
776 } else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {
778 if (rs.u.max == offsetof(struct rte_mbuf, next)) {
779 eval_fill_imm(rd, msk, 0);
781 } else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {
782 eval_fill_imm(rd, msk, 0);
783 rd->v.type = RTE_BPF_ARG_PTR;
784 rd->v.size = rs.v.buf_size;
785 } else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {
786 eval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);
787 rd->v.type = RTE_BPF_ARG_RAW;
789 eval_max_load(rd, msk);
790 rd->v.type = RTE_BPF_ARG_RAW;
793 /* pointer to raw data */
795 eval_max_load(rd, msk);
796 rd->v.type = RTE_BPF_ARG_RAW;
803 eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)
807 static const struct {
810 } mbuf_ro_fileds[] = {
811 { .off = offsetof(struct rte_mbuf, buf_addr), },
812 { .off = offsetof(struct rte_mbuf, refcnt), },
813 { .off = offsetof(struct rte_mbuf, nb_segs), },
814 { .off = offsetof(struct rte_mbuf, buf_len), },
815 { .off = offsetof(struct rte_mbuf, pool), },
816 { .off = offsetof(struct rte_mbuf, next), },
817 { .off = offsetof(struct rte_mbuf, priv_size), },
820 for (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&
821 (mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=
822 rv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);
826 if (i != RTE_DIM(mbuf_ro_fileds))
827 return "store to the read-only mbuf field";
834 eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
839 struct bpf_eval_state *st;
840 struct bpf_reg_val rd, rs, *sv;
842 opsz = bpf_size(BPF_SIZE(ins->code));
843 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
846 rd = st->rv[ins->dst_reg];
848 if (BPF_CLASS(ins->code) == BPF_STX) {
849 rs = st->rv[ins->src_reg];
850 eval_apply_mask(&rs, msk);
852 eval_fill_imm(&rs, msk, ins->imm);
854 err = eval_defined(NULL, &rs);
858 err = eval_ptr(bvf, &rd, opsz, 1, ins->off);
862 if (rd.v.type == RTE_BPF_ARG_PTR_STACK) {
864 sv = st->sv + rd.u.max / sizeof(uint64_t);
865 if (BPF_CLASS(ins->code) == BPF_STX &&
866 BPF_MODE(ins->code) == EBPF_XADD)
867 eval_max_bound(sv, msk);
871 /* pointer to mbuf */
872 } else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {
873 err = eval_mbuf_store(&rd, opsz);
882 eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,
883 struct bpf_reg_val *rv)
886 struct bpf_eval_state *st;
891 if (rv->v.type == RTE_BPF_ARG_UNDEF)
892 return "Undefined argument type";
894 if (arg->type != rv->v.type &&
895 arg->type != RTE_BPF_ARG_RAW &&
896 (arg->type != RTE_BPF_ARG_PTR ||
897 RTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))
898 return "Invalid argument type";
902 /* argument is a pointer */
903 if (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {
905 err = eval_ptr(bvf, rv, arg->size, 1, 0);
908 * pointer to the variable on the stack is passed
909 * as an argument, mark stack space it occupies as initialized.
911 if (err == NULL && rv->v.type == RTE_BPF_ARG_PTR_STACK) {
913 i = rv->u.max / sizeof(uint64_t);
914 n = i + arg->size / sizeof(uint64_t);
916 eval_fill_max_bound(st->sv + i, UINT64_MAX);
926 eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
929 struct bpf_reg_val *rv;
930 const struct rte_bpf_xsym *xsym;
935 if (idx >= bvf->prm->nb_xsym ||
936 bvf->prm->xsym[idx].type != RTE_BPF_XTYPE_FUNC)
937 return "invalid external function index";
939 /* for now don't support function calls on 32 bit platform */
940 if (sizeof(uint64_t) != sizeof(uintptr_t))
941 return "function calls are supported only for 64 bit apps";
943 xsym = bvf->prm->xsym + idx;
945 /* evaluate function arguments */
947 for (i = 0; i != xsym->func.nb_args && err == NULL; i++) {
948 err = eval_func_arg(bvf, xsym->func.args + i,
949 bvf->evst->rv + EBPF_REG_1 + i);
952 /* R1-R5 argument/scratch registers */
953 for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
954 bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
956 /* update return value */
958 rv = bvf->evst->rv + EBPF_REG_0;
959 rv->v = xsym->func.ret;
960 if (rv->v.type == RTE_BPF_ARG_RAW)
961 eval_fill_max_bound(rv,
962 RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
963 else if (RTE_BPF_ARG_PTR_TYPE(rv->v.type) != 0)
964 eval_fill_imm64(rv, UINTPTR_MAX, 0);
970 eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)
972 /* sreg is constant */
973 if (trs->u.min == trs->u.max) {
975 /* dreg is constant */
976 } else if (trd->u.min == trd->u.max) {
979 trd->u.max = RTE_MIN(trd->u.max, trs->u.max);
980 trd->u.min = RTE_MAX(trd->u.min, trs->u.min);
984 /* sreg is constant */
985 if (trs->s.min == trs->s.max) {
987 /* dreg is constant */
988 } else if (trd->s.min == trd->s.max) {
991 trd->s.max = RTE_MIN(trd->s.max, trs->s.max);
992 trd->s.min = RTE_MAX(trd->s.min, trs->s.min);
998 eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
999 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1001 frd->u.max = RTE_MIN(frd->u.max, frs->u.min);
1002 trd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);
1006 eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1007 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1009 frd->u.min = RTE_MAX(frd->u.min, frs->u.min);
1010 trd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);
1014 eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1015 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1017 frd->s.max = RTE_MIN(frd->s.max, frs->s.min);
1018 trd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);
1022 eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1023 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1025 frd->s.min = RTE_MAX(frd->s.min, frs->s.min);
1026 trd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);
1030 eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
1034 struct bpf_eval_state *fst, *tst;
1035 struct bpf_reg_val *frd, *frs, *trd, *trs;
1036 struct bpf_reg_val rvf, rvt;
1039 fst = bvf->evin->evst;
1041 frd = fst->rv + ins->dst_reg;
1042 trd = tst->rv + ins->dst_reg;
1044 if (BPF_SRC(ins->code) == BPF_X) {
1045 frs = fst->rv + ins->src_reg;
1046 trs = tst->rv + ins->src_reg;
1050 eval_fill_imm(frs, UINT64_MAX, ins->imm);
1051 eval_fill_imm(trs, UINT64_MAX, ins->imm);
1054 err = eval_defined(trd, trs);
1058 op = BPF_OP(ins->code);
1061 eval_jeq_jne(trd, trs);
1062 else if (op == EBPF_JNE)
1063 eval_jeq_jne(frd, frs);
1064 else if (op == BPF_JGT)
1065 eval_jgt_jle(trd, trs, frd, frs);
1066 else if (op == EBPF_JLE)
1067 eval_jgt_jle(frd, frs, trd, trs);
1068 else if (op == EBPF_JLT)
1069 eval_jlt_jge(trd, trs, frd, frs);
1070 else if (op == BPF_JGE)
1071 eval_jlt_jge(frd, frs, trd, trs);
1072 else if (op == EBPF_JSGT)
1073 eval_jsgt_jsle(trd, trs, frd, frs);
1074 else if (op == EBPF_JSLE)
1075 eval_jsgt_jsle(frd, frs, trd, trs);
1076 else if (op == EBPF_JLT)
1077 eval_jslt_jsge(trd, trs, frd, frs);
1078 else if (op == EBPF_JSGE)
1079 eval_jslt_jsge(frd, frs, trd, trs);
1085 * validate parameters for each instruction type.
1087 static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = {
1088 /* ALU IMM 32-bit instructions */
1089 [(BPF_ALU | BPF_ADD | BPF_K)] = {
1090 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1091 .off = { .min = 0, .max = 0},
1092 .imm = { .min = 0, .max = UINT32_MAX,},
1095 [(BPF_ALU | BPF_SUB | BPF_K)] = {
1096 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1097 .off = { .min = 0, .max = 0},
1098 .imm = { .min = 0, .max = UINT32_MAX,},
1101 [(BPF_ALU | BPF_AND | BPF_K)] = {
1102 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1103 .off = { .min = 0, .max = 0},
1104 .imm = { .min = 0, .max = UINT32_MAX,},
1107 [(BPF_ALU | BPF_OR | BPF_K)] = {
1108 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1109 .off = { .min = 0, .max = 0},
1110 .imm = { .min = 0, .max = UINT32_MAX,},
1113 [(BPF_ALU | BPF_LSH | BPF_K)] = {
1114 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1115 .off = { .min = 0, .max = 0},
1116 .imm = { .min = 0, .max = UINT32_MAX,},
1119 [(BPF_ALU | BPF_RSH | BPF_K)] = {
1120 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1121 .off = { .min = 0, .max = 0},
1122 .imm = { .min = 0, .max = UINT32_MAX,},
1125 [(BPF_ALU | BPF_XOR | BPF_K)] = {
1126 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1127 .off = { .min = 0, .max = 0},
1128 .imm = { .min = 0, .max = UINT32_MAX,},
1131 [(BPF_ALU | BPF_MUL | BPF_K)] = {
1132 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1133 .off = { .min = 0, .max = 0},
1134 .imm = { .min = 0, .max = UINT32_MAX,},
1137 [(BPF_ALU | EBPF_MOV | BPF_K)] = {
1138 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1139 .off = { .min = 0, .max = 0},
1140 .imm = { .min = 0, .max = UINT32_MAX,},
1143 [(BPF_ALU | BPF_DIV | BPF_K)] = {
1144 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1145 .off = { .min = 0, .max = 0},
1146 .imm = { .min = 1, .max = UINT32_MAX},
1149 [(BPF_ALU | BPF_MOD | BPF_K)] = {
1150 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1151 .off = { .min = 0, .max = 0},
1152 .imm = { .min = 1, .max = UINT32_MAX},
1155 /* ALU IMM 64-bit instructions */
1156 [(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
1157 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1158 .off = { .min = 0, .max = 0},
1159 .imm = { .min = 0, .max = UINT32_MAX,},
1162 [(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
1163 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1164 .off = { .min = 0, .max = 0},
1165 .imm = { .min = 0, .max = UINT32_MAX,},
1168 [(EBPF_ALU64 | BPF_AND | BPF_K)] = {
1169 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1170 .off = { .min = 0, .max = 0},
1171 .imm = { .min = 0, .max = UINT32_MAX,},
1174 [(EBPF_ALU64 | BPF_OR | BPF_K)] = {
1175 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1176 .off = { .min = 0, .max = 0},
1177 .imm = { .min = 0, .max = UINT32_MAX,},
1180 [(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
1181 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1182 .off = { .min = 0, .max = 0},
1183 .imm = { .min = 0, .max = UINT32_MAX,},
1186 [(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
1187 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1188 .off = { .min = 0, .max = 0},
1189 .imm = { .min = 0, .max = UINT32_MAX,},
1192 [(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
1193 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1194 .off = { .min = 0, .max = 0},
1195 .imm = { .min = 0, .max = UINT32_MAX,},
1198 [(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
1199 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1200 .off = { .min = 0, .max = 0},
1201 .imm = { .min = 0, .max = UINT32_MAX,},
1204 [(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
1205 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1206 .off = { .min = 0, .max = 0},
1207 .imm = { .min = 0, .max = UINT32_MAX,},
1210 [(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
1211 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1212 .off = { .min = 0, .max = 0},
1213 .imm = { .min = 0, .max = UINT32_MAX,},
1216 [(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
1217 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1218 .off = { .min = 0, .max = 0},
1219 .imm = { .min = 1, .max = UINT32_MAX},
1222 [(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
1223 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1224 .off = { .min = 0, .max = 0},
1225 .imm = { .min = 1, .max = UINT32_MAX},
1228 /* ALU REG 32-bit instructions */
1229 [(BPF_ALU | BPF_ADD | BPF_X)] = {
1230 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1231 .off = { .min = 0, .max = 0},
1232 .imm = { .min = 0, .max = 0},
1235 [(BPF_ALU | BPF_SUB | BPF_X)] = {
1236 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1237 .off = { .min = 0, .max = 0},
1238 .imm = { .min = 0, .max = 0},
1241 [(BPF_ALU | BPF_AND | BPF_X)] = {
1242 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1243 .off = { .min = 0, .max = 0},
1244 .imm = { .min = 0, .max = 0},
1247 [(BPF_ALU | BPF_OR | BPF_X)] = {
1248 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1249 .off = { .min = 0, .max = 0},
1250 .imm = { .min = 0, .max = 0},
1253 [(BPF_ALU | BPF_LSH | BPF_X)] = {
1254 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1255 .off = { .min = 0, .max = 0},
1256 .imm = { .min = 0, .max = 0},
1259 [(BPF_ALU | BPF_RSH | BPF_X)] = {
1260 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1261 .off = { .min = 0, .max = 0},
1262 .imm = { .min = 0, .max = 0},
1265 [(BPF_ALU | BPF_XOR | BPF_X)] = {
1266 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1267 .off = { .min = 0, .max = 0},
1268 .imm = { .min = 0, .max = 0},
1271 [(BPF_ALU | BPF_MUL | BPF_X)] = {
1272 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1273 .off = { .min = 0, .max = 0},
1274 .imm = { .min = 0, .max = 0},
1277 [(BPF_ALU | BPF_DIV | BPF_X)] = {
1278 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1279 .off = { .min = 0, .max = 0},
1280 .imm = { .min = 0, .max = 0},
1283 [(BPF_ALU | BPF_MOD | BPF_X)] = {
1284 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1285 .off = { .min = 0, .max = 0},
1286 .imm = { .min = 0, .max = 0},
1289 [(BPF_ALU | EBPF_MOV | BPF_X)] = {
1290 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1291 .off = { .min = 0, .max = 0},
1292 .imm = { .min = 0, .max = 0},
1295 [(BPF_ALU | BPF_NEG)] = {
1296 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1297 .off = { .min = 0, .max = 0},
1298 .imm = { .min = 0, .max = 0},
1301 [(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
1302 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1303 .off = { .min = 0, .max = 0},
1304 .imm = { .min = 16, .max = 64},
1305 .check = check_alu_bele,
1308 [(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
1309 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1310 .off = { .min = 0, .max = 0},
1311 .imm = { .min = 16, .max = 64},
1312 .check = check_alu_bele,
1315 /* ALU REG 64-bit instructions */
1316 [(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
1317 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1318 .off = { .min = 0, .max = 0},
1319 .imm = { .min = 0, .max = 0},
1322 [(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
1323 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1324 .off = { .min = 0, .max = 0},
1325 .imm = { .min = 0, .max = 0},
1328 [(EBPF_ALU64 | BPF_AND | BPF_X)] = {
1329 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1330 .off = { .min = 0, .max = 0},
1331 .imm = { .min = 0, .max = 0},
1334 [(EBPF_ALU64 | BPF_OR | BPF_X)] = {
1335 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1336 .off = { .min = 0, .max = 0},
1337 .imm = { .min = 0, .max = 0},
1340 [(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
1341 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1342 .off = { .min = 0, .max = 0},
1343 .imm = { .min = 0, .max = 0},
1346 [(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
1347 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1348 .off = { .min = 0, .max = 0},
1349 .imm = { .min = 0, .max = 0},
1352 [(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
1353 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1354 .off = { .min = 0, .max = 0},
1355 .imm = { .min = 0, .max = 0},
1358 [(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
1359 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1360 .off = { .min = 0, .max = 0},
1361 .imm = { .min = 0, .max = 0},
1364 [(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
1365 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1366 .off = { .min = 0, .max = 0},
1367 .imm = { .min = 0, .max = 0},
1370 [(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
1371 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1372 .off = { .min = 0, .max = 0},
1373 .imm = { .min = 0, .max = 0},
1376 [(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
1377 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1378 .off = { .min = 0, .max = 0},
1379 .imm = { .min = 0, .max = 0},
1382 [(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
1383 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1384 .off = { .min = 0, .max = 0},
1385 .imm = { .min = 0, .max = 0},
1388 [(EBPF_ALU64 | BPF_NEG)] = {
1389 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1390 .off = { .min = 0, .max = 0},
1391 .imm = { .min = 0, .max = 0},
1394 /* load instructions */
1395 [(BPF_LDX | BPF_MEM | BPF_B)] = {
1396 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1397 .off = { .min = 0, .max = UINT16_MAX},
1398 .imm = { .min = 0, .max = 0},
1401 [(BPF_LDX | BPF_MEM | BPF_H)] = {
1402 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1403 .off = { .min = 0, .max = UINT16_MAX},
1404 .imm = { .min = 0, .max = 0},
1407 [(BPF_LDX | BPF_MEM | BPF_W)] = {
1408 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1409 .off = { .min = 0, .max = UINT16_MAX},
1410 .imm = { .min = 0, .max = 0},
1413 [(BPF_LDX | BPF_MEM | EBPF_DW)] = {
1414 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1415 .off = { .min = 0, .max = UINT16_MAX},
1416 .imm = { .min = 0, .max = 0},
1419 /* load 64 bit immediate value */
1420 [(BPF_LD | BPF_IMM | EBPF_DW)] = {
1421 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1422 .off = { .min = 0, .max = 0},
1423 .imm = { .min = 0, .max = UINT32_MAX},
1424 .eval = eval_ld_imm64,
1426 /* store REG instructions */
1427 [(BPF_STX | BPF_MEM | BPF_B)] = {
1428 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1429 .off = { .min = 0, .max = UINT16_MAX},
1430 .imm = { .min = 0, .max = 0},
1433 [(BPF_STX | BPF_MEM | BPF_H)] = {
1434 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1435 .off = { .min = 0, .max = UINT16_MAX},
1436 .imm = { .min = 0, .max = 0},
1439 [(BPF_STX | BPF_MEM | BPF_W)] = {
1440 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1441 .off = { .min = 0, .max = UINT16_MAX},
1442 .imm = { .min = 0, .max = 0},
1445 [(BPF_STX | BPF_MEM | EBPF_DW)] = {
1446 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1447 .off = { .min = 0, .max = UINT16_MAX},
1448 .imm = { .min = 0, .max = 0},
1451 /* atomic add instructions */
1452 [(BPF_STX | EBPF_XADD | BPF_W)] = {
1453 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1454 .off = { .min = 0, .max = UINT16_MAX},
1455 .imm = { .min = 0, .max = 0},
1458 [(BPF_STX | EBPF_XADD | EBPF_DW)] = {
1459 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1460 .off = { .min = 0, .max = UINT16_MAX},
1461 .imm = { .min = 0, .max = 0},
1464 /* store IMM instructions */
1465 [(BPF_ST | BPF_MEM | BPF_B)] = {
1466 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1467 .off = { .min = 0, .max = UINT16_MAX},
1468 .imm = { .min = 0, .max = UINT32_MAX},
1471 [(BPF_ST | BPF_MEM | BPF_H)] = {
1472 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1473 .off = { .min = 0, .max = UINT16_MAX},
1474 .imm = { .min = 0, .max = UINT32_MAX},
1477 [(BPF_ST | BPF_MEM | BPF_W)] = {
1478 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1479 .off = { .min = 0, .max = UINT16_MAX},
1480 .imm = { .min = 0, .max = UINT32_MAX},
1483 [(BPF_ST | BPF_MEM | EBPF_DW)] = {
1484 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1485 .off = { .min = 0, .max = UINT16_MAX},
1486 .imm = { .min = 0, .max = UINT32_MAX},
1489 /* jump instruction */
1490 [(BPF_JMP | BPF_JA)] = {
1491 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1492 .off = { .min = 0, .max = UINT16_MAX},
1493 .imm = { .min = 0, .max = 0},
1495 /* jcc IMM instructions */
1496 [(BPF_JMP | BPF_JEQ | BPF_K)] = {
1497 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1498 .off = { .min = 0, .max = UINT16_MAX},
1499 .imm = { .min = 0, .max = UINT32_MAX},
1502 [(BPF_JMP | EBPF_JNE | BPF_K)] = {
1503 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1504 .off = { .min = 0, .max = UINT16_MAX},
1505 .imm = { .min = 0, .max = UINT32_MAX},
1508 [(BPF_JMP | BPF_JGT | BPF_K)] = {
1509 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1510 .off = { .min = 0, .max = UINT16_MAX},
1511 .imm = { .min = 0, .max = UINT32_MAX},
1514 [(BPF_JMP | EBPF_JLT | BPF_K)] = {
1515 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1516 .off = { .min = 0, .max = UINT16_MAX},
1517 .imm = { .min = 0, .max = UINT32_MAX},
1520 [(BPF_JMP | BPF_JGE | BPF_K)] = {
1521 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1522 .off = { .min = 0, .max = UINT16_MAX},
1523 .imm = { .min = 0, .max = UINT32_MAX},
1526 [(BPF_JMP | EBPF_JLE | BPF_K)] = {
1527 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1528 .off = { .min = 0, .max = UINT16_MAX},
1529 .imm = { .min = 0, .max = UINT32_MAX},
1532 [(BPF_JMP | EBPF_JSGT | BPF_K)] = {
1533 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1534 .off = { .min = 0, .max = UINT16_MAX},
1535 .imm = { .min = 0, .max = UINT32_MAX},
1538 [(BPF_JMP | EBPF_JSLT | BPF_K)] = {
1539 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1540 .off = { .min = 0, .max = UINT16_MAX},
1541 .imm = { .min = 0, .max = UINT32_MAX},
1544 [(BPF_JMP | EBPF_JSGE | BPF_K)] = {
1545 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1546 .off = { .min = 0, .max = UINT16_MAX},
1547 .imm = { .min = 0, .max = UINT32_MAX},
1550 [(BPF_JMP | EBPF_JSLE | BPF_K)] = {
1551 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1552 .off = { .min = 0, .max = UINT16_MAX},
1553 .imm = { .min = 0, .max = UINT32_MAX},
1556 [(BPF_JMP | BPF_JSET | BPF_K)] = {
1557 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1558 .off = { .min = 0, .max = UINT16_MAX},
1559 .imm = { .min = 0, .max = UINT32_MAX},
1562 /* jcc REG instructions */
1563 [(BPF_JMP | BPF_JEQ | BPF_X)] = {
1564 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1565 .off = { .min = 0, .max = UINT16_MAX},
1566 .imm = { .min = 0, .max = 0},
1569 [(BPF_JMP | EBPF_JNE | BPF_X)] = {
1570 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1571 .off = { .min = 0, .max = UINT16_MAX},
1572 .imm = { .min = 0, .max = 0},
1575 [(BPF_JMP | BPF_JGT | BPF_X)] = {
1576 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1577 .off = { .min = 0, .max = UINT16_MAX},
1578 .imm = { .min = 0, .max = 0},
1581 [(BPF_JMP | EBPF_JLT | BPF_X)] = {
1582 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1583 .off = { .min = 0, .max = UINT16_MAX},
1584 .imm = { .min = 0, .max = 0},
1587 [(BPF_JMP | BPF_JGE | BPF_X)] = {
1588 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1589 .off = { .min = 0, .max = UINT16_MAX},
1590 .imm = { .min = 0, .max = 0},
1593 [(BPF_JMP | EBPF_JLE | BPF_X)] = {
1594 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1595 .off = { .min = 0, .max = UINT16_MAX},
1596 .imm = { .min = 0, .max = 0},
1599 [(BPF_JMP | EBPF_JSGT | BPF_X)] = {
1600 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1601 .off = { .min = 0, .max = UINT16_MAX},
1602 .imm = { .min = 0, .max = 0},
1605 [(BPF_JMP | EBPF_JSLT | BPF_X)] = {
1606 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1607 .off = { .min = 0, .max = UINT16_MAX},
1608 .imm = { .min = 0, .max = 0},
1610 [(BPF_JMP | EBPF_JSGE | BPF_X)] = {
1611 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1612 .off = { .min = 0, .max = UINT16_MAX},
1613 .imm = { .min = 0, .max = 0},
1616 [(BPF_JMP | EBPF_JSLE | BPF_X)] = {
1617 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1618 .off = { .min = 0, .max = UINT16_MAX},
1619 .imm = { .min = 0, .max = 0},
1622 [(BPF_JMP | BPF_JSET | BPF_X)] = {
1623 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1624 .off = { .min = 0, .max = UINT16_MAX},
1625 .imm = { .min = 0, .max = 0},
1628 /* call instruction */
1629 [(BPF_JMP | EBPF_CALL)] = {
1630 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1631 .off = { .min = 0, .max = 0},
1632 .imm = { .min = 0, .max = UINT32_MAX},
1635 /* ret instruction */
1636 [(BPF_JMP | EBPF_EXIT)] = {
1637 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1638 .off = { .min = 0, .max = 0},
1639 .imm = { .min = 0, .max = 0},
1645 * make sure that instruction syntax is valid,
1646 * and it fields don't violate partciular instrcution type restrictions.
1649 check_syntax(const struct ebpf_insn *ins)
1658 if (ins_chk[op].mask.dreg == 0)
1659 return "invalid opcode";
1661 if ((ins_chk[op].mask.dreg & 1 << ins->dst_reg) == 0)
1662 return "invalid dst-reg field";
1664 if ((ins_chk[op].mask.sreg & 1 << ins->src_reg) == 0)
1665 return "invalid src-reg field";
1668 if (ins_chk[op].off.min > off || ins_chk[op].off.max < off)
1669 return "invalid off field";
1672 if (ins_chk[op].imm.min > imm || ins_chk[op].imm.max < imm)
1673 return "invalid imm field";
1675 if (ins_chk[op].check != NULL)
1676 return ins_chk[op].check(ins);
1682 * helper function, return instruction index for the given node.
1685 get_node_idx(const struct bpf_verifier *bvf, const struct inst_node *node)
1687 return node - bvf->in;
1691 * helper function, used to walk through constructed CFG.
1693 static struct inst_node *
1694 get_next_node(struct bpf_verifier *bvf, struct inst_node *node)
1696 uint32_t ce, ne, dst;
1699 ce = node->cur_edge;
1704 dst = node->edge_dest[ce];
1705 return bvf->in + dst;
1709 set_node_colour(struct bpf_verifier *bvf, struct inst_node *node,
1714 prev = node->colour;
1717 bvf->node_colour[prev]--;
1718 bvf->node_colour[new]++;
1722 * helper function, add new edge between two nodes.
1725 add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
1729 if (nidx > bvf->prm->nb_ins) {
1730 RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
1732 __func__, get_node_idx(bvf, node), nidx);
1737 if (ne >= RTE_DIM(node->edge_dest)) {
1738 RTE_BPF_LOG(ERR, "%s: internal error at pc: %u\n",
1739 __func__, get_node_idx(bvf, node));
1743 node->edge_dest[ne] = nidx;
1744 node->nb_edge = ne + 1;
1749 * helper function, determine type of edge between two nodes.
1752 set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
1753 const struct inst_node *next)
1755 uint32_t ce, clr, type;
1757 ce = node->cur_edge - 1;
1760 type = UNKNOWN_EDGE;
1764 else if (clr == GREY)
1766 else if (clr == BLACK)
1768 * in fact it could be either direct or cross edge,
1769 * but for now, we don't need to distinguish between them.
1773 node->edge_type[ce] = type;
1774 bvf->edge_type[type]++;
1777 static struct inst_node *
1778 get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
1780 return bvf->in + node->prev_node;
1784 * Depth-First Search (DFS) through previously constructed
1785 * Control Flow Graph (CFG).
1786 * Information collected at this path would be used later
1787 * to determine is there any loops, and/or unreachable instructions.
1790 dfs(struct bpf_verifier *bvf)
1792 struct inst_node *next, *node;
1795 while (node != NULL) {
1797 if (node->colour == WHITE)
1798 set_node_colour(bvf, node, GREY);
1800 if (node->colour == GREY) {
1802 /* find next unprocessed child node */
1804 next = get_next_node(bvf, node);
1807 set_edge_type(bvf, node, next);
1808 } while (next->colour != WHITE);
1811 /* proceed with next child */
1812 next->prev_node = get_node_idx(bvf, node);
1816 * finished with current node and all it's kids,
1817 * proceed with parent
1819 set_node_colour(bvf, node, BLACK);
1821 node = get_prev_node(bvf, node);
1829 * report unreachable instructions.
1832 log_unreachable(const struct bpf_verifier *bvf)
1835 struct inst_node *node;
1836 const struct ebpf_insn *ins;
1838 for (i = 0; i != bvf->prm->nb_ins; i++) {
1841 ins = bvf->prm->ins + i;
1843 if (node->colour == WHITE &&
1844 ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
1845 RTE_BPF_LOG(ERR, "unreachable code at pc: %u;\n", i);
1850 * report loops detected.
1853 log_loop(const struct bpf_verifier *bvf)
1856 struct inst_node *node;
1858 for (i = 0; i != bvf->prm->nb_ins; i++) {
1861 if (node->colour != BLACK)
1864 for (j = 0; j != node->nb_edge; j++) {
1865 if (node->edge_type[j] == BACK_EDGE)
1867 "loop at pc:%u --> pc:%u;\n",
1868 i, node->edge_dest[j]);
1874 * First pass goes though all instructions in the set, checks that each
1875 * instruction is a valid one (correct syntax, valid field values, etc.)
1876 * and constructs control flow graph (CFG).
1877 * Then deapth-first search is performed over the constructed graph.
1878 * Programs with unreachable instructions and/or loops will be rejected.
1881 validate(struct bpf_verifier *bvf)
1885 struct inst_node *node;
1886 const struct ebpf_insn *ins;
1890 for (i = 0; i < bvf->prm->nb_ins; i++) {
1892 ins = bvf->prm->ins + i;
1895 err = check_syntax(ins);
1897 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
1903 * construct CFG, jcc nodes have to outgoing edges,
1904 * 'exit' nodes - none, all others nodes have exaclty one
1907 switch (ins->code) {
1908 case (BPF_JMP | EBPF_EXIT):
1910 case (BPF_JMP | BPF_JEQ | BPF_K):
1911 case (BPF_JMP | EBPF_JNE | BPF_K):
1912 case (BPF_JMP | BPF_JGT | BPF_K):
1913 case (BPF_JMP | EBPF_JLT | BPF_K):
1914 case (BPF_JMP | BPF_JGE | BPF_K):
1915 case (BPF_JMP | EBPF_JLE | BPF_K):
1916 case (BPF_JMP | EBPF_JSGT | BPF_K):
1917 case (BPF_JMP | EBPF_JSLT | BPF_K):
1918 case (BPF_JMP | EBPF_JSGE | BPF_K):
1919 case (BPF_JMP | EBPF_JSLE | BPF_K):
1920 case (BPF_JMP | BPF_JSET | BPF_K):
1921 case (BPF_JMP | BPF_JEQ | BPF_X):
1922 case (BPF_JMP | EBPF_JNE | BPF_X):
1923 case (BPF_JMP | BPF_JGT | BPF_X):
1924 case (BPF_JMP | EBPF_JLT | BPF_X):
1925 case (BPF_JMP | BPF_JGE | BPF_X):
1926 case (BPF_JMP | EBPF_JLE | BPF_X):
1927 case (BPF_JMP | EBPF_JSGT | BPF_X):
1928 case (BPF_JMP | EBPF_JSLT | BPF_X):
1929 case (BPF_JMP | EBPF_JSGE | BPF_X):
1930 case (BPF_JMP | EBPF_JSLE | BPF_X):
1931 case (BPF_JMP | BPF_JSET | BPF_X):
1932 rc |= add_edge(bvf, node, i + ins->off + 1);
1933 rc |= add_edge(bvf, node, i + 1);
1934 bvf->nb_jcc_nodes++;
1936 case (BPF_JMP | BPF_JA):
1937 rc |= add_edge(bvf, node, i + ins->off + 1);
1939 /* load 64 bit immediate value */
1940 case (BPF_LD | BPF_IMM | EBPF_DW):
1941 rc |= add_edge(bvf, node, i + 2);
1945 rc |= add_edge(bvf, node, i + 1);
1950 bvf->node_colour[WHITE]++;
1958 RTE_BPF_LOG(DEBUG, "%s(%p) stats:\n"
1960 "nb_jcc_nodes=%u;\n"
1961 "node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
1962 "edge_type={[UNKNOWN]=%u, [TREE]=%u, [BACK]=%u, [CROSS]=%u};\n",
1966 bvf->node_colour[WHITE], bvf->node_colour[GREY],
1967 bvf->node_colour[BLACK],
1968 bvf->edge_type[UNKNOWN_EDGE], bvf->edge_type[TREE_EDGE],
1969 bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
1971 if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
1972 RTE_BPF_LOG(ERR, "%s(%p) unreachable instructions;\n",
1974 log_unreachable(bvf);
1978 if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
1979 bvf->edge_type[UNKNOWN_EDGE] != 0) {
1980 RTE_BPF_LOG(ERR, "%s(%p) DFS internal error;\n",
1985 if (bvf->edge_type[BACK_EDGE] != 0) {
1986 RTE_BPF_LOG(ERR, "%s(%p) loops detected;\n",
1996 * helper functions get/free eval states.
1998 static struct bpf_eval_state *
1999 pull_eval_state(struct bpf_verifier *bvf)
2003 n = bvf->evst_pool.cur;
2004 if (n == bvf->evst_pool.num)
2007 bvf->evst_pool.cur = n + 1;
2008 return bvf->evst_pool.ent + n;
2012 push_eval_state(struct bpf_verifier *bvf)
2014 bvf->evst_pool.cur--;
2018 evst_pool_fini(struct bpf_verifier *bvf)
2021 free(bvf->evst_pool.ent);
2022 memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
2026 evst_pool_init(struct bpf_verifier *bvf)
2030 n = bvf->nb_jcc_nodes + 1;
2032 bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
2033 if (bvf->evst_pool.ent == NULL)
2036 bvf->evst_pool.num = n;
2037 bvf->evst_pool.cur = 0;
2039 bvf->evst = pull_eval_state(bvf);
2044 * Save current eval state.
2047 save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2049 struct bpf_eval_state *st;
2051 /* get new eval_state for this node */
2052 st = pull_eval_state(bvf);
2055 "%s: internal error (out of space) at pc: %u\n",
2056 __func__, get_node_idx(bvf, node));
2060 /* make a copy of current state */
2061 memcpy(st, bvf->evst, sizeof(*st));
2063 /* swap current state with new one */
2064 node->evst = bvf->evst;
2067 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2068 __func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
2074 * Restore previous eval state and mark current eval state as free.
2077 restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2079 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2080 __func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
2082 bvf->evst = node->evst;
2084 push_eval_state(bvf);
2088 log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
2089 uint32_t pc, int32_t loglvl)
2091 const struct bpf_eval_state *st;
2092 const struct bpf_reg_val *rv;
2094 rte_log(loglvl, rte_bpf_logtype, "%s(pc=%u):\n", __func__, pc);
2097 rv = st->rv + ins->dst_reg;
2099 rte_log(loglvl, rte_bpf_logtype,
2101 "\tv={type=%u, size=%zu},\n"
2102 "\tmask=0x%" PRIx64 ",\n"
2103 "\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
2104 "\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
2107 rv->v.type, rv->v.size,
2109 rv->u.min, rv->u.max,
2110 rv->s.min, rv->s.max);
2114 * Do second pass through CFG and try to evaluate instructions
2115 * via each possible path.
2116 * Right now evaluation functionality is quite limited.
2117 * Still need to add extra checks for:
2118 * - use/return uninitialized registers.
2119 * - use uninitialized data from the stack.
2120 * - memory boundaries violation.
2123 evaluate(struct bpf_verifier *bvf)
2128 const struct ebpf_insn *ins;
2129 struct inst_node *next, *node;
2131 /* initial state of frame pointer */
2132 static const struct bpf_reg_val rvfp = {
2134 .type = RTE_BPF_ARG_PTR_STACK,
2135 .size = MAX_BPF_STACK_SIZE,
2138 .u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2139 .s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2142 bvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;
2143 bvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;
2144 if (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)
2145 eval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);
2147 bvf->evst->rv[EBPF_REG_10] = rvfp;
2149 ins = bvf->prm->ins;
2154 while (node != NULL && rc == 0) {
2157 * current node evaluation, make sure we evaluate
2158 * each node only once.
2163 idx = get_node_idx(bvf, node);
2166 /* for jcc node make a copy of evaluatoion state */
2167 if (node->nb_edge > 1)
2168 rc |= save_eval_state(bvf, node);
2170 if (ins_chk[op].eval != NULL && rc == 0) {
2171 err = ins_chk[op].eval(bvf, ins + idx);
2173 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
2174 __func__, err, idx);
2179 log_eval_state(bvf, ins + idx, idx, RTE_LOG_DEBUG);
2183 /* proceed through CFG */
2184 next = get_next_node(bvf, node);
2187 /* proceed with next child */
2188 if (node->cur_edge == node->nb_edge &&
2190 restore_eval_state(bvf, node);
2192 next->prev_node = get_node_idx(bvf, node);
2196 * finished with current node and all it's kids,
2197 * proceed with parent
2200 node = get_prev_node(bvf, node);
2203 if (node == bvf->in)
2212 bpf_validate(struct rte_bpf *bpf)
2215 struct bpf_verifier bvf;
2217 /* check input argument type, don't allow mbuf ptr on 32-bit */
2218 if (bpf->prm.prog_arg.type != RTE_BPF_ARG_RAW &&
2219 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
2220 (sizeof(uint64_t) != sizeof(uintptr_t) ||
2221 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
2222 RTE_BPF_LOG(ERR, "%s: unsupported argument type\n", __func__);
2226 memset(&bvf, 0, sizeof(bvf));
2227 bvf.prm = &bpf->prm;
2228 bvf.in = calloc(bpf->prm.nb_ins, sizeof(bvf.in[0]));
2232 rc = validate(&bvf);
2235 rc = evst_pool_init(&bvf);
2237 rc = evaluate(&bvf);
2238 evst_pool_fini(&bvf);
2243 /* copy collected info */
2245 bpf->stack_sz = bvf.stack_sz;