1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
12 #include <rte_common.h>
14 #include <rte_byteorder.h>
18 #define BPF_ARG_PTR_STACK RTE_BPF_ARG_RESERVED
33 struct bpf_eval_state {
34 struct bpf_reg_val rv[EBPF_REG_NUM];
35 struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
38 /* possible instruction node colour */
46 /* possible edge types */
61 uint8_t edge_type[MAX_EDGES];
62 uint32_t edge_dest[MAX_EDGES];
64 struct bpf_eval_state *evst;
68 const struct rte_bpf_prm *prm;
72 uint32_t nb_jcc_nodes;
73 uint32_t node_colour[MAX_NODE_COLOUR];
74 uint32_t edge_type[MAX_EDGE_TYPE];
75 struct bpf_eval_state *evst;
76 struct inst_node *evin;
80 struct bpf_eval_state *ent;
84 struct bpf_ins_check {
97 const char * (*check)(const struct ebpf_insn *);
98 const char * (*eval)(struct bpf_verifier *, const struct ebpf_insn *);
101 #define ALL_REGS RTE_LEN2MASK(EBPF_REG_NUM, uint16_t)
102 #define WRT_REGS RTE_LEN2MASK(EBPF_REG_10, uint16_t)
103 #define ZERO_REG RTE_LEN2MASK(EBPF_REG_1, uint16_t)
105 /* For LD_IND R6 is an implicit CTX register. */
106 #define IND_SRC_REGS (WRT_REGS ^ 1 << EBPF_REG_6)
109 * check and evaluate functions for particular instruction types.
113 check_alu_bele(const struct ebpf_insn *ins)
115 if (ins->imm != 16 && ins->imm != 32 && ins->imm != 64)
116 return "invalid imm field";
121 eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
124 if (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)
125 return "undefined return value";
129 /* setup max possible with this mask bounds */
131 eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)
138 eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)
140 rv->s.max = mask >> 1;
141 rv->s.min = rv->s.max ^ UINT64_MAX;
145 eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)
147 eval_umax_bound(rv, mask);
148 eval_smax_bound(rv, mask);
152 eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)
154 eval_max_bound(rv, mask);
155 rv->v.type = RTE_BPF_ARG_RAW;
160 eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)
170 eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)
174 v = (uint64_t)imm & mask;
176 rv->v.type = RTE_BPF_ARG_RAW;
177 eval_fill_imm64(rv, mask, v);
181 eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
185 struct bpf_reg_val *rd;
187 val = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;
189 rd = bvf->evst->rv + ins->dst_reg;
190 rd->v.type = RTE_BPF_ARG_RAW;
191 eval_fill_imm64(rd, UINT64_MAX, val);
193 for (i = 0; i != bvf->prm->nb_xsym; i++) {
195 /* load of external variable */
196 if (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&
197 (uintptr_t)bvf->prm->xsym[i].var.val == val) {
198 rd->v = bvf->prm->xsym[i].var.desc;
199 eval_fill_imm64(rd, UINT64_MAX, 0);
208 eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)
210 struct bpf_reg_val rt;
212 rt.u.min = rv->u.min & mask;
213 rt.u.max = rv->u.max & mask;
214 if (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {
215 rv->u.max = RTE_MAX(rt.u.max, mask);
219 eval_smax_bound(&rt, mask);
220 rv->s.max = RTE_MIN(rt.s.max, rv->s.max);
221 rv->s.min = RTE_MAX(rt.s.min, rv->s.min);
227 eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
229 struct bpf_reg_val rv;
231 rv.u.min = (rd->u.min + rs->u.min) & msk;
232 rv.u.max = (rd->u.max + rs->u.max) & msk;
233 rv.s.min = (rd->s.min + rs->s.min) & msk;
234 rv.s.max = (rd->s.max + rs->s.max) & msk;
237 * if at least one of the operands is not constant,
238 * then check for overflow
240 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
241 (rv.u.min < rd->u.min || rv.u.max < rd->u.max))
242 eval_umax_bound(&rv, msk);
244 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
245 (((rs->s.min < 0 && rv.s.min > rd->s.min) ||
246 rv.s.min < rd->s.min) ||
247 ((rs->s.max < 0 && rv.s.max > rd->s.max) ||
248 rv.s.max < rd->s.max)))
249 eval_smax_bound(&rv, msk);
256 eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
258 struct bpf_reg_val rv;
260 rv.u.min = (rd->u.min - rs->u.max) & msk;
261 rv.u.max = (rd->u.max - rs->u.min) & msk;
262 rv.s.min = (rd->s.min - rs->s.max) & msk;
263 rv.s.max = (rd->s.max - rs->s.min) & msk;
266 * if at least one of the operands is not constant,
267 * then check for overflow
269 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
270 (rv.u.min > rd->u.min || rv.u.max > rd->u.max))
271 eval_umax_bound(&rv, msk);
273 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
274 (((rs->s.min < 0 && rv.s.min < rd->s.min) ||
275 rv.s.min > rd->s.min) ||
276 ((rs->s.max < 0 && rv.s.max < rd->s.max) ||
277 rv.s.max > rd->s.max)))
278 eval_smax_bound(&rv, msk);
285 eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
288 /* check if shift value is less then max result bits */
289 if (rs->u.max >= opsz) {
290 eval_max_bound(rd, msk);
294 /* check for overflow */
295 if (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))
296 eval_umax_bound(rd, msk);
298 rd->u.max <<= rs->u.max;
299 rd->u.min <<= rs->u.min;
302 /* check that dreg values are and would remain always positive */
303 if ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=
304 RTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))
305 eval_smax_bound(rd, msk);
307 rd->s.max <<= rs->u.max;
308 rd->s.min <<= rs->u.min;
313 eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
316 /* check if shift value is less then max result bits */
317 if (rs->u.max >= opsz) {
318 eval_max_bound(rd, msk);
322 rd->u.max >>= rs->u.min;
323 rd->u.min >>= rs->u.max;
325 /* check that dreg values are always positive */
326 if ((uint64_t)rd->s.min >> (opsz - 1) != 0)
327 eval_smax_bound(rd, msk);
329 rd->s.max >>= rs->u.min;
330 rd->s.min >>= rs->u.max;
335 eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
340 /* check if shift value is less then max result bits */
341 if (rs->u.max >= opsz) {
342 eval_max_bound(rd, msk);
346 rd->u.max = (int64_t)rd->u.max >> rs->u.min;
347 rd->u.min = (int64_t)rd->u.min >> rs->u.max;
349 /* if we have 32-bit values - extend them to 64-bit */
350 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
358 rd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;
360 rd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;
363 rd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;
365 rd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;
369 eval_umax_bits(uint64_t v, size_t opsz)
374 v = __builtin_clzll(v);
375 return RTE_LEN2MASK(opsz - v, uint64_t);
378 /* estimate max possible value for (v1 & v2) */
380 eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)
382 v1 = eval_umax_bits(v1, opsz);
383 v2 = eval_umax_bits(v2, opsz);
387 /* estimate max possible value for (v1 | v2) */
389 eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)
391 v1 = eval_umax_bits(v1, opsz);
392 v2 = eval_umax_bits(v2, opsz);
397 eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
400 /* both operands are constants */
401 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
402 rd->u.min &= rs->u.min;
403 rd->u.max &= rs->u.max;
405 rd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);
406 rd->u.min &= rs->u.min;
409 /* both operands are constants */
410 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
411 rd->s.min &= rs->s.min;
412 rd->s.max &= rs->s.max;
413 /* at least one of operand is non-negative */
414 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
415 rd->s.max = eval_uand_max(rd->s.max & (msk >> 1),
416 rs->s.max & (msk >> 1), opsz);
417 rd->s.min &= rs->s.min;
419 eval_smax_bound(rd, msk);
423 eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
426 /* both operands are constants */
427 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
428 rd->u.min |= rs->u.min;
429 rd->u.max |= rs->u.max;
431 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
432 rd->u.min |= rs->u.min;
435 /* both operands are constants */
436 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
437 rd->s.min |= rs->s.min;
438 rd->s.max |= rs->s.max;
440 /* both operands are non-negative */
441 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
442 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
443 rd->s.min |= rs->s.min;
445 eval_smax_bound(rd, msk);
449 eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
452 /* both operands are constants */
453 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
454 rd->u.min ^= rs->u.min;
455 rd->u.max ^= rs->u.max;
457 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
461 /* both operands are constants */
462 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
463 rd->s.min ^= rs->s.min;
464 rd->s.max ^= rs->s.max;
466 /* both operands are non-negative */
467 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
468 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
471 eval_smax_bound(rd, msk);
475 eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
478 /* both operands are constants */
479 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
480 rd->u.min = (rd->u.min * rs->u.min) & msk;
481 rd->u.max = (rd->u.max * rs->u.max) & msk;
482 /* check for overflow */
483 } else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {
484 rd->u.max *= rs->u.max;
485 rd->u.min *= rd->u.min;
487 eval_umax_bound(rd, msk);
489 /* both operands are constants */
490 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
491 rd->s.min = (rd->s.min * rs->s.min) & msk;
492 rd->s.max = (rd->s.max * rs->s.max) & msk;
493 /* check that both operands are positive and no overflow */
494 } else if (rd->s.min >= 0 && rs->s.min >= 0) {
495 rd->s.max *= rs->s.max;
496 rd->s.min *= rd->s.min;
498 eval_smax_bound(rd, msk);
502 eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,
503 size_t opsz, uint64_t msk)
505 /* both operands are constants */
506 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
508 return "division by 0";
510 rd->u.min /= rs->u.min;
511 rd->u.max /= rs->u.max;
513 rd->u.min %= rs->u.min;
514 rd->u.max %= rs->u.max;
518 rd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);
520 rd->u.max = rd->u.max;
524 /* if we have 32-bit values - extend them to 64-bit */
525 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
526 rd->s.min = (int32_t)rd->s.min;
527 rd->s.max = (int32_t)rd->s.max;
528 rs->s.min = (int32_t)rs->s.min;
529 rs->s.max = (int32_t)rs->s.max;
532 /* both operands are constants */
533 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
535 return "division by 0";
537 rd->s.min /= rs->s.min;
538 rd->s.max /= rs->s.max;
540 rd->s.min %= rs->s.min;
541 rd->s.max %= rs->s.max;
543 } else if (op == BPF_MOD) {
544 rd->s.min = RTE_MAX(rd->s.max, 0);
545 rd->s.min = RTE_MIN(rd->s.min, 0);
547 eval_smax_bound(rd, msk);
556 eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
561 /* if we have 32-bit values - extend them to 64-bit */
562 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
563 rd->u.min = (int32_t)rd->u.min;
564 rd->u.max = (int32_t)rd->u.max;
567 ux = -(int64_t)rd->u.min & msk;
568 uy = -(int64_t)rd->u.max & msk;
570 rd->u.max = RTE_MAX(ux, uy);
571 rd->u.min = RTE_MIN(ux, uy);
573 /* if we have 32-bit values - extend them to 64-bit */
574 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
575 rd->s.min = (int32_t)rd->s.min;
576 rd->s.max = (int32_t)rd->s.max;
579 sx = -rd->s.min & msk;
580 sy = -rd->s.max & msk;
582 rd->s.max = RTE_MAX(sx, sy);
583 rd->s.min = RTE_MIN(sx, sy);
587 eval_ld_mbuf(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
590 struct bpf_reg_val *rv, ri, rs;
592 mode = BPF_MODE(ins->code);
594 /* R6 is an implicit input that must contain pointer to mbuf */
595 if (bvf->evst->rv[EBPF_REG_6].v.type != RTE_BPF_ARG_PTR_MBUF)
596 return "invalid type for implicit ctx register";
598 if (mode == BPF_IND) {
599 rs = bvf->evst->rv[ins->src_reg];
600 if (rs.v.type != RTE_BPF_ARG_RAW)
601 return "unexpected type for src register";
603 eval_fill_imm(&ri, UINT64_MAX, ins->imm);
604 eval_add(&rs, &ri, UINT64_MAX);
606 if (rs.s.max < 0 || rs.u.min > UINT32_MAX)
607 return "mbuf boundary violation";
610 /* R1-R5 scratch registers */
611 for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
612 bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
614 /* R0 is an implicit output, contains data fetched from the packet */
615 rv = bvf->evst->rv + EBPF_REG_0;
616 rv->v.size = bpf_size(BPF_SIZE(ins->code));
617 eval_fill_max_bound(rv, RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
623 * check that destination and source operand are in defined state.
626 eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)
628 if (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)
629 return "dest reg value is undefined";
630 if (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)
631 return "src reg value is undefined";
636 eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
642 struct bpf_eval_state *st;
643 struct bpf_reg_val *rd, rs;
645 opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
646 sizeof(uint32_t) : sizeof(uint64_t);
647 opsz = opsz * CHAR_BIT;
648 msk = RTE_LEN2MASK(opsz, uint64_t);
651 rd = st->rv + ins->dst_reg;
653 if (BPF_SRC(ins->code) == BPF_X) {
654 rs = st->rv[ins->src_reg];
655 eval_apply_mask(&rs, msk);
657 eval_fill_imm(&rs, msk, ins->imm);
659 eval_apply_mask(rd, msk);
661 op = BPF_OP(ins->code);
663 err = eval_defined((op != EBPF_MOV) ? rd : NULL,
664 (op != BPF_NEG) ? &rs : NULL);
669 eval_add(rd, &rs, msk);
670 else if (op == BPF_SUB)
671 eval_sub(rd, &rs, msk);
672 else if (op == BPF_LSH)
673 eval_lsh(rd, &rs, opsz, msk);
674 else if (op == BPF_RSH)
675 eval_rsh(rd, &rs, opsz, msk);
676 else if (op == EBPF_ARSH)
677 eval_arsh(rd, &rs, opsz, msk);
678 else if (op == BPF_AND)
679 eval_and(rd, &rs, opsz, msk);
680 else if (op == BPF_OR)
681 eval_or(rd, &rs, opsz, msk);
682 else if (op == BPF_XOR)
683 eval_xor(rd, &rs, opsz, msk);
684 else if (op == BPF_MUL)
685 eval_mul(rd, &rs, opsz, msk);
686 else if (op == BPF_DIV || op == BPF_MOD)
687 err = eval_divmod(op, rd, &rs, opsz, msk);
688 else if (op == BPF_NEG)
689 eval_neg(rd, opsz, msk);
690 else if (op == EBPF_MOV)
693 eval_max_bound(rd, msk);
699 eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
702 struct bpf_eval_state *st;
703 struct bpf_reg_val *rd;
706 msk = RTE_LEN2MASK(ins->imm, uint64_t);
709 rd = st->rv + ins->dst_reg;
711 err = eval_defined(rd, NULL);
715 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
716 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))
717 eval_max_bound(rd, msk);
719 eval_apply_mask(rd, msk);
721 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))
722 eval_max_bound(rd, msk);
724 eval_apply_mask(rd, msk);
731 eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,
732 uint32_t align, int16_t off)
734 struct bpf_reg_val rv;
736 /* calculate reg + offset */
737 eval_fill_imm(&rv, rm->mask, off);
738 eval_add(rm, &rv, rm->mask);
740 if (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)
741 return "destination is not a pointer";
743 if (rm->mask != UINT64_MAX)
744 return "pointer truncation";
746 if (rm->u.max + opsz > rm->v.size ||
747 (uint64_t)rm->s.max + opsz > rm->v.size ||
749 return "memory boundary violation";
751 if (rm->u.max % align != 0)
752 return "unaligned memory access";
754 if (rm->v.type == BPF_ARG_PTR_STACK) {
756 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
757 rm->u.max != (uint64_t)rm->s.max)
758 return "stack access with variable offset";
760 bvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);
762 /* pointer to mbuf */
763 } else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {
765 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
766 rm->u.max != (uint64_t)rm->s.max)
767 return "mbuf access with variable offset";
774 eval_max_load(struct bpf_reg_val *rv, uint64_t mask)
776 eval_umax_bound(rv, mask);
778 /* full 64-bit load */
779 if (mask == UINT64_MAX)
780 eval_smax_bound(rv, mask);
782 /* zero-extend load */
783 rv->s.min = rv->u.min;
784 rv->s.max = rv->u.max;
789 eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
794 struct bpf_eval_state *st;
795 struct bpf_reg_val *rd, rs;
796 const struct bpf_reg_val *sv;
799 rd = st->rv + ins->dst_reg;
800 rs = st->rv[ins->src_reg];
801 opsz = bpf_size(BPF_SIZE(ins->code));
802 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
804 err = eval_ptr(bvf, &rs, opsz, 1, ins->off);
808 if (rs.v.type == BPF_ARG_PTR_STACK) {
810 sv = st->sv + rs.u.max / sizeof(uint64_t);
811 if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)
812 return "undefined value on the stack";
816 /* pointer to mbuf */
817 } else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {
819 if (rs.u.max == offsetof(struct rte_mbuf, next)) {
820 eval_fill_imm(rd, msk, 0);
822 } else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {
823 eval_fill_imm(rd, msk, 0);
824 rd->v.type = RTE_BPF_ARG_PTR;
825 rd->v.size = rs.v.buf_size;
826 } else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {
827 eval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);
828 rd->v.type = RTE_BPF_ARG_RAW;
830 eval_max_load(rd, msk);
831 rd->v.type = RTE_BPF_ARG_RAW;
834 /* pointer to raw data */
836 eval_max_load(rd, msk);
837 rd->v.type = RTE_BPF_ARG_RAW;
844 eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)
848 static const struct {
851 } mbuf_ro_fileds[] = {
852 { .off = offsetof(struct rte_mbuf, buf_addr), },
853 { .off = offsetof(struct rte_mbuf, refcnt), },
854 { .off = offsetof(struct rte_mbuf, nb_segs), },
855 { .off = offsetof(struct rte_mbuf, buf_len), },
856 { .off = offsetof(struct rte_mbuf, pool), },
857 { .off = offsetof(struct rte_mbuf, next), },
858 { .off = offsetof(struct rte_mbuf, priv_size), },
861 for (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&
862 (mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=
863 rv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);
867 if (i != RTE_DIM(mbuf_ro_fileds))
868 return "store to the read-only mbuf field";
875 eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
880 struct bpf_eval_state *st;
881 struct bpf_reg_val rd, rs, *sv;
883 opsz = bpf_size(BPF_SIZE(ins->code));
884 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
887 rd = st->rv[ins->dst_reg];
889 if (BPF_CLASS(ins->code) == BPF_STX) {
890 rs = st->rv[ins->src_reg];
891 eval_apply_mask(&rs, msk);
893 eval_fill_imm(&rs, msk, ins->imm);
895 err = eval_defined(NULL, &rs);
899 err = eval_ptr(bvf, &rd, opsz, 1, ins->off);
903 if (rd.v.type == BPF_ARG_PTR_STACK) {
905 sv = st->sv + rd.u.max / sizeof(uint64_t);
906 if (BPF_CLASS(ins->code) == BPF_STX &&
907 BPF_MODE(ins->code) == EBPF_XADD)
908 eval_max_bound(sv, msk);
912 /* pointer to mbuf */
913 } else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {
914 err = eval_mbuf_store(&rd, opsz);
923 eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,
924 struct bpf_reg_val *rv)
927 struct bpf_eval_state *st;
932 if (rv->v.type == RTE_BPF_ARG_UNDEF)
933 return "Undefined argument type";
935 if (arg->type != rv->v.type &&
936 arg->type != RTE_BPF_ARG_RAW &&
937 (arg->type != RTE_BPF_ARG_PTR ||
938 RTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))
939 return "Invalid argument type";
943 /* argument is a pointer */
944 if (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {
946 err = eval_ptr(bvf, rv, arg->size, 1, 0);
949 * pointer to the variable on the stack is passed
950 * as an argument, mark stack space it occupies as initialized.
952 if (err == NULL && rv->v.type == BPF_ARG_PTR_STACK) {
954 i = rv->u.max / sizeof(uint64_t);
955 n = i + arg->size / sizeof(uint64_t);
957 eval_fill_max_bound(st->sv + i, UINT64_MAX);
967 eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
970 struct bpf_reg_val *rv;
971 const struct rte_bpf_xsym *xsym;
976 if (idx >= bvf->prm->nb_xsym ||
977 bvf->prm->xsym[idx].type != RTE_BPF_XTYPE_FUNC)
978 return "invalid external function index";
980 /* for now don't support function calls on 32 bit platform */
981 if (sizeof(uint64_t) != sizeof(uintptr_t))
982 return "function calls are supported only for 64 bit apps";
984 xsym = bvf->prm->xsym + idx;
986 /* evaluate function arguments */
988 for (i = 0; i != xsym->func.nb_args && err == NULL; i++) {
989 err = eval_func_arg(bvf, xsym->func.args + i,
990 bvf->evst->rv + EBPF_REG_1 + i);
993 /* R1-R5 argument/scratch registers */
994 for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
995 bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
997 /* update return value */
999 rv = bvf->evst->rv + EBPF_REG_0;
1000 rv->v = xsym->func.ret;
1001 if (rv->v.type == RTE_BPF_ARG_RAW)
1002 eval_fill_max_bound(rv,
1003 RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
1004 else if (RTE_BPF_ARG_PTR_TYPE(rv->v.type) != 0)
1005 eval_fill_imm64(rv, UINTPTR_MAX, 0);
1011 eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)
1013 /* sreg is constant */
1014 if (trs->u.min == trs->u.max) {
1016 /* dreg is constant */
1017 } else if (trd->u.min == trd->u.max) {
1020 trd->u.max = RTE_MIN(trd->u.max, trs->u.max);
1021 trd->u.min = RTE_MAX(trd->u.min, trs->u.min);
1025 /* sreg is constant */
1026 if (trs->s.min == trs->s.max) {
1028 /* dreg is constant */
1029 } else if (trd->s.min == trd->s.max) {
1032 trd->s.max = RTE_MIN(trd->s.max, trs->s.max);
1033 trd->s.min = RTE_MAX(trd->s.min, trs->s.min);
1039 eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1040 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1042 frd->u.max = RTE_MIN(frd->u.max, frs->u.min);
1043 trd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);
1047 eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1048 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1050 frd->u.min = RTE_MAX(frd->u.min, frs->u.min);
1051 trd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);
1055 eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1056 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1058 frd->s.max = RTE_MIN(frd->s.max, frs->s.min);
1059 trd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);
1063 eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1064 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1066 frd->s.min = RTE_MAX(frd->s.min, frs->s.min);
1067 trd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);
1071 eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
1075 struct bpf_eval_state *fst, *tst;
1076 struct bpf_reg_val *frd, *frs, *trd, *trs;
1077 struct bpf_reg_val rvf, rvt;
1080 fst = bvf->evin->evst;
1082 frd = fst->rv + ins->dst_reg;
1083 trd = tst->rv + ins->dst_reg;
1085 if (BPF_SRC(ins->code) == BPF_X) {
1086 frs = fst->rv + ins->src_reg;
1087 trs = tst->rv + ins->src_reg;
1091 eval_fill_imm(frs, UINT64_MAX, ins->imm);
1092 eval_fill_imm(trs, UINT64_MAX, ins->imm);
1095 err = eval_defined(trd, trs);
1099 op = BPF_OP(ins->code);
1102 eval_jeq_jne(trd, trs);
1103 else if (op == EBPF_JNE)
1104 eval_jeq_jne(frd, frs);
1105 else if (op == BPF_JGT)
1106 eval_jgt_jle(trd, trs, frd, frs);
1107 else if (op == EBPF_JLE)
1108 eval_jgt_jle(frd, frs, trd, trs);
1109 else if (op == EBPF_JLT)
1110 eval_jlt_jge(trd, trs, frd, frs);
1111 else if (op == BPF_JGE)
1112 eval_jlt_jge(frd, frs, trd, trs);
1113 else if (op == EBPF_JSGT)
1114 eval_jsgt_jsle(trd, trs, frd, frs);
1115 else if (op == EBPF_JSLE)
1116 eval_jsgt_jsle(frd, frs, trd, trs);
1117 else if (op == EBPF_JLT)
1118 eval_jslt_jsge(trd, trs, frd, frs);
1119 else if (op == EBPF_JSGE)
1120 eval_jslt_jsge(frd, frs, trd, trs);
1126 * validate parameters for each instruction type.
1128 static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = {
1129 /* ALU IMM 32-bit instructions */
1130 [(BPF_ALU | BPF_ADD | BPF_K)] = {
1131 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1132 .off = { .min = 0, .max = 0},
1133 .imm = { .min = 0, .max = UINT32_MAX,},
1136 [(BPF_ALU | BPF_SUB | BPF_K)] = {
1137 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1138 .off = { .min = 0, .max = 0},
1139 .imm = { .min = 0, .max = UINT32_MAX,},
1142 [(BPF_ALU | BPF_AND | BPF_K)] = {
1143 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1144 .off = { .min = 0, .max = 0},
1145 .imm = { .min = 0, .max = UINT32_MAX,},
1148 [(BPF_ALU | BPF_OR | BPF_K)] = {
1149 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1150 .off = { .min = 0, .max = 0},
1151 .imm = { .min = 0, .max = UINT32_MAX,},
1154 [(BPF_ALU | BPF_LSH | BPF_K)] = {
1155 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1156 .off = { .min = 0, .max = 0},
1157 .imm = { .min = 0, .max = UINT32_MAX,},
1160 [(BPF_ALU | BPF_RSH | BPF_K)] = {
1161 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1162 .off = { .min = 0, .max = 0},
1163 .imm = { .min = 0, .max = UINT32_MAX,},
1166 [(BPF_ALU | BPF_XOR | BPF_K)] = {
1167 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1168 .off = { .min = 0, .max = 0},
1169 .imm = { .min = 0, .max = UINT32_MAX,},
1172 [(BPF_ALU | BPF_MUL | BPF_K)] = {
1173 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1174 .off = { .min = 0, .max = 0},
1175 .imm = { .min = 0, .max = UINT32_MAX,},
1178 [(BPF_ALU | EBPF_MOV | BPF_K)] = {
1179 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1180 .off = { .min = 0, .max = 0},
1181 .imm = { .min = 0, .max = UINT32_MAX,},
1184 [(BPF_ALU | BPF_DIV | BPF_K)] = {
1185 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1186 .off = { .min = 0, .max = 0},
1187 .imm = { .min = 1, .max = UINT32_MAX},
1190 [(BPF_ALU | BPF_MOD | BPF_K)] = {
1191 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1192 .off = { .min = 0, .max = 0},
1193 .imm = { .min = 1, .max = UINT32_MAX},
1196 /* ALU IMM 64-bit instructions */
1197 [(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
1198 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1199 .off = { .min = 0, .max = 0},
1200 .imm = { .min = 0, .max = UINT32_MAX,},
1203 [(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
1204 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1205 .off = { .min = 0, .max = 0},
1206 .imm = { .min = 0, .max = UINT32_MAX,},
1209 [(EBPF_ALU64 | BPF_AND | BPF_K)] = {
1210 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1211 .off = { .min = 0, .max = 0},
1212 .imm = { .min = 0, .max = UINT32_MAX,},
1215 [(EBPF_ALU64 | BPF_OR | BPF_K)] = {
1216 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1217 .off = { .min = 0, .max = 0},
1218 .imm = { .min = 0, .max = UINT32_MAX,},
1221 [(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
1222 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1223 .off = { .min = 0, .max = 0},
1224 .imm = { .min = 0, .max = UINT32_MAX,},
1227 [(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
1228 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1229 .off = { .min = 0, .max = 0},
1230 .imm = { .min = 0, .max = UINT32_MAX,},
1233 [(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
1234 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1235 .off = { .min = 0, .max = 0},
1236 .imm = { .min = 0, .max = UINT32_MAX,},
1239 [(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
1240 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1241 .off = { .min = 0, .max = 0},
1242 .imm = { .min = 0, .max = UINT32_MAX,},
1245 [(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
1246 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1247 .off = { .min = 0, .max = 0},
1248 .imm = { .min = 0, .max = UINT32_MAX,},
1251 [(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
1252 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1253 .off = { .min = 0, .max = 0},
1254 .imm = { .min = 0, .max = UINT32_MAX,},
1257 [(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
1258 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1259 .off = { .min = 0, .max = 0},
1260 .imm = { .min = 1, .max = UINT32_MAX},
1263 [(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
1264 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1265 .off = { .min = 0, .max = 0},
1266 .imm = { .min = 1, .max = UINT32_MAX},
1269 /* ALU REG 32-bit instructions */
1270 [(BPF_ALU | BPF_ADD | BPF_X)] = {
1271 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1272 .off = { .min = 0, .max = 0},
1273 .imm = { .min = 0, .max = 0},
1276 [(BPF_ALU | BPF_SUB | BPF_X)] = {
1277 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1278 .off = { .min = 0, .max = 0},
1279 .imm = { .min = 0, .max = 0},
1282 [(BPF_ALU | BPF_AND | BPF_X)] = {
1283 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1284 .off = { .min = 0, .max = 0},
1285 .imm = { .min = 0, .max = 0},
1288 [(BPF_ALU | BPF_OR | BPF_X)] = {
1289 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1290 .off = { .min = 0, .max = 0},
1291 .imm = { .min = 0, .max = 0},
1294 [(BPF_ALU | BPF_LSH | BPF_X)] = {
1295 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1296 .off = { .min = 0, .max = 0},
1297 .imm = { .min = 0, .max = 0},
1300 [(BPF_ALU | BPF_RSH | BPF_X)] = {
1301 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1302 .off = { .min = 0, .max = 0},
1303 .imm = { .min = 0, .max = 0},
1306 [(BPF_ALU | BPF_XOR | BPF_X)] = {
1307 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1308 .off = { .min = 0, .max = 0},
1309 .imm = { .min = 0, .max = 0},
1312 [(BPF_ALU | BPF_MUL | BPF_X)] = {
1313 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1314 .off = { .min = 0, .max = 0},
1315 .imm = { .min = 0, .max = 0},
1318 [(BPF_ALU | BPF_DIV | BPF_X)] = {
1319 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1320 .off = { .min = 0, .max = 0},
1321 .imm = { .min = 0, .max = 0},
1324 [(BPF_ALU | BPF_MOD | BPF_X)] = {
1325 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1326 .off = { .min = 0, .max = 0},
1327 .imm = { .min = 0, .max = 0},
1330 [(BPF_ALU | EBPF_MOV | BPF_X)] = {
1331 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1332 .off = { .min = 0, .max = 0},
1333 .imm = { .min = 0, .max = 0},
1336 [(BPF_ALU | BPF_NEG)] = {
1337 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1338 .off = { .min = 0, .max = 0},
1339 .imm = { .min = 0, .max = 0},
1342 [(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
1343 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1344 .off = { .min = 0, .max = 0},
1345 .imm = { .min = 16, .max = 64},
1346 .check = check_alu_bele,
1349 [(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
1350 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1351 .off = { .min = 0, .max = 0},
1352 .imm = { .min = 16, .max = 64},
1353 .check = check_alu_bele,
1356 /* ALU REG 64-bit instructions */
1357 [(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
1358 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1359 .off = { .min = 0, .max = 0},
1360 .imm = { .min = 0, .max = 0},
1363 [(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
1364 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1365 .off = { .min = 0, .max = 0},
1366 .imm = { .min = 0, .max = 0},
1369 [(EBPF_ALU64 | BPF_AND | BPF_X)] = {
1370 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1371 .off = { .min = 0, .max = 0},
1372 .imm = { .min = 0, .max = 0},
1375 [(EBPF_ALU64 | BPF_OR | BPF_X)] = {
1376 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1377 .off = { .min = 0, .max = 0},
1378 .imm = { .min = 0, .max = 0},
1381 [(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
1382 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1383 .off = { .min = 0, .max = 0},
1384 .imm = { .min = 0, .max = 0},
1387 [(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
1388 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1389 .off = { .min = 0, .max = 0},
1390 .imm = { .min = 0, .max = 0},
1393 [(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
1394 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1395 .off = { .min = 0, .max = 0},
1396 .imm = { .min = 0, .max = 0},
1399 [(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
1400 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1401 .off = { .min = 0, .max = 0},
1402 .imm = { .min = 0, .max = 0},
1405 [(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
1406 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1407 .off = { .min = 0, .max = 0},
1408 .imm = { .min = 0, .max = 0},
1411 [(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
1412 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1413 .off = { .min = 0, .max = 0},
1414 .imm = { .min = 0, .max = 0},
1417 [(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
1418 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1419 .off = { .min = 0, .max = 0},
1420 .imm = { .min = 0, .max = 0},
1423 [(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
1424 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1425 .off = { .min = 0, .max = 0},
1426 .imm = { .min = 0, .max = 0},
1429 [(EBPF_ALU64 | BPF_NEG)] = {
1430 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1431 .off = { .min = 0, .max = 0},
1432 .imm = { .min = 0, .max = 0},
1435 /* load instructions */
1436 [(BPF_LDX | BPF_MEM | BPF_B)] = {
1437 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1438 .off = { .min = 0, .max = UINT16_MAX},
1439 .imm = { .min = 0, .max = 0},
1442 [(BPF_LDX | BPF_MEM | BPF_H)] = {
1443 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1444 .off = { .min = 0, .max = UINT16_MAX},
1445 .imm = { .min = 0, .max = 0},
1448 [(BPF_LDX | BPF_MEM | BPF_W)] = {
1449 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1450 .off = { .min = 0, .max = UINT16_MAX},
1451 .imm = { .min = 0, .max = 0},
1454 [(BPF_LDX | BPF_MEM | EBPF_DW)] = {
1455 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1456 .off = { .min = 0, .max = UINT16_MAX},
1457 .imm = { .min = 0, .max = 0},
1460 /* load 64 bit immediate value */
1461 [(BPF_LD | BPF_IMM | EBPF_DW)] = {
1462 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1463 .off = { .min = 0, .max = 0},
1464 .imm = { .min = 0, .max = UINT32_MAX},
1465 .eval = eval_ld_imm64,
1467 /* load absolute instructions */
1468 [(BPF_LD | BPF_ABS | BPF_B)] = {
1469 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1470 .off = { .min = 0, .max = 0},
1471 .imm = { .min = 0, .max = INT32_MAX},
1472 .eval = eval_ld_mbuf,
1474 [(BPF_LD | BPF_ABS | BPF_H)] = {
1475 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1476 .off = { .min = 0, .max = 0},
1477 .imm = { .min = 0, .max = INT32_MAX},
1478 .eval = eval_ld_mbuf,
1480 [(BPF_LD | BPF_ABS | BPF_W)] = {
1481 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1482 .off = { .min = 0, .max = 0},
1483 .imm = { .min = 0, .max = INT32_MAX},
1484 .eval = eval_ld_mbuf,
1486 /* load indirect instructions */
1487 [(BPF_LD | BPF_IND | BPF_B)] = {
1488 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1489 .off = { .min = 0, .max = 0},
1490 .imm = { .min = 0, .max = UINT32_MAX},
1491 .eval = eval_ld_mbuf,
1493 [(BPF_LD | BPF_IND | BPF_H)] = {
1494 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1495 .off = { .min = 0, .max = 0},
1496 .imm = { .min = 0, .max = UINT32_MAX},
1497 .eval = eval_ld_mbuf,
1499 [(BPF_LD | BPF_IND | BPF_W)] = {
1500 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1501 .off = { .min = 0, .max = 0},
1502 .imm = { .min = 0, .max = UINT32_MAX},
1503 .eval = eval_ld_mbuf,
1505 /* store REG instructions */
1506 [(BPF_STX | BPF_MEM | BPF_B)] = {
1507 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1508 .off = { .min = 0, .max = UINT16_MAX},
1509 .imm = { .min = 0, .max = 0},
1512 [(BPF_STX | BPF_MEM | BPF_H)] = {
1513 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1514 .off = { .min = 0, .max = UINT16_MAX},
1515 .imm = { .min = 0, .max = 0},
1518 [(BPF_STX | BPF_MEM | BPF_W)] = {
1519 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1520 .off = { .min = 0, .max = UINT16_MAX},
1521 .imm = { .min = 0, .max = 0},
1524 [(BPF_STX | BPF_MEM | EBPF_DW)] = {
1525 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1526 .off = { .min = 0, .max = UINT16_MAX},
1527 .imm = { .min = 0, .max = 0},
1530 /* atomic add instructions */
1531 [(BPF_STX | EBPF_XADD | BPF_W)] = {
1532 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1533 .off = { .min = 0, .max = UINT16_MAX},
1534 .imm = { .min = 0, .max = 0},
1537 [(BPF_STX | EBPF_XADD | EBPF_DW)] = {
1538 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1539 .off = { .min = 0, .max = UINT16_MAX},
1540 .imm = { .min = 0, .max = 0},
1543 /* store IMM instructions */
1544 [(BPF_ST | BPF_MEM | BPF_B)] = {
1545 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1546 .off = { .min = 0, .max = UINT16_MAX},
1547 .imm = { .min = 0, .max = UINT32_MAX},
1550 [(BPF_ST | BPF_MEM | BPF_H)] = {
1551 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1552 .off = { .min = 0, .max = UINT16_MAX},
1553 .imm = { .min = 0, .max = UINT32_MAX},
1556 [(BPF_ST | BPF_MEM | BPF_W)] = {
1557 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1558 .off = { .min = 0, .max = UINT16_MAX},
1559 .imm = { .min = 0, .max = UINT32_MAX},
1562 [(BPF_ST | BPF_MEM | EBPF_DW)] = {
1563 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1564 .off = { .min = 0, .max = UINT16_MAX},
1565 .imm = { .min = 0, .max = UINT32_MAX},
1568 /* jump instruction */
1569 [(BPF_JMP | BPF_JA)] = {
1570 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1571 .off = { .min = 0, .max = UINT16_MAX},
1572 .imm = { .min = 0, .max = 0},
1574 /* jcc IMM instructions */
1575 [(BPF_JMP | BPF_JEQ | BPF_K)] = {
1576 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1577 .off = { .min = 0, .max = UINT16_MAX},
1578 .imm = { .min = 0, .max = UINT32_MAX},
1581 [(BPF_JMP | EBPF_JNE | BPF_K)] = {
1582 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1583 .off = { .min = 0, .max = UINT16_MAX},
1584 .imm = { .min = 0, .max = UINT32_MAX},
1587 [(BPF_JMP | BPF_JGT | BPF_K)] = {
1588 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1589 .off = { .min = 0, .max = UINT16_MAX},
1590 .imm = { .min = 0, .max = UINT32_MAX},
1593 [(BPF_JMP | EBPF_JLT | BPF_K)] = {
1594 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1595 .off = { .min = 0, .max = UINT16_MAX},
1596 .imm = { .min = 0, .max = UINT32_MAX},
1599 [(BPF_JMP | BPF_JGE | BPF_K)] = {
1600 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1601 .off = { .min = 0, .max = UINT16_MAX},
1602 .imm = { .min = 0, .max = UINT32_MAX},
1605 [(BPF_JMP | EBPF_JLE | BPF_K)] = {
1606 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1607 .off = { .min = 0, .max = UINT16_MAX},
1608 .imm = { .min = 0, .max = UINT32_MAX},
1611 [(BPF_JMP | EBPF_JSGT | BPF_K)] = {
1612 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1613 .off = { .min = 0, .max = UINT16_MAX},
1614 .imm = { .min = 0, .max = UINT32_MAX},
1617 [(BPF_JMP | EBPF_JSLT | BPF_K)] = {
1618 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1619 .off = { .min = 0, .max = UINT16_MAX},
1620 .imm = { .min = 0, .max = UINT32_MAX},
1623 [(BPF_JMP | EBPF_JSGE | BPF_K)] = {
1624 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1625 .off = { .min = 0, .max = UINT16_MAX},
1626 .imm = { .min = 0, .max = UINT32_MAX},
1629 [(BPF_JMP | EBPF_JSLE | BPF_K)] = {
1630 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1631 .off = { .min = 0, .max = UINT16_MAX},
1632 .imm = { .min = 0, .max = UINT32_MAX},
1635 [(BPF_JMP | BPF_JSET | BPF_K)] = {
1636 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1637 .off = { .min = 0, .max = UINT16_MAX},
1638 .imm = { .min = 0, .max = UINT32_MAX},
1641 /* jcc REG instructions */
1642 [(BPF_JMP | BPF_JEQ | BPF_X)] = {
1643 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1644 .off = { .min = 0, .max = UINT16_MAX},
1645 .imm = { .min = 0, .max = 0},
1648 [(BPF_JMP | EBPF_JNE | BPF_X)] = {
1649 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1650 .off = { .min = 0, .max = UINT16_MAX},
1651 .imm = { .min = 0, .max = 0},
1654 [(BPF_JMP | BPF_JGT | BPF_X)] = {
1655 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1656 .off = { .min = 0, .max = UINT16_MAX},
1657 .imm = { .min = 0, .max = 0},
1660 [(BPF_JMP | EBPF_JLT | BPF_X)] = {
1661 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1662 .off = { .min = 0, .max = UINT16_MAX},
1663 .imm = { .min = 0, .max = 0},
1666 [(BPF_JMP | BPF_JGE | BPF_X)] = {
1667 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1668 .off = { .min = 0, .max = UINT16_MAX},
1669 .imm = { .min = 0, .max = 0},
1672 [(BPF_JMP | EBPF_JLE | BPF_X)] = {
1673 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1674 .off = { .min = 0, .max = UINT16_MAX},
1675 .imm = { .min = 0, .max = 0},
1678 [(BPF_JMP | EBPF_JSGT | BPF_X)] = {
1679 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1680 .off = { .min = 0, .max = UINT16_MAX},
1681 .imm = { .min = 0, .max = 0},
1684 [(BPF_JMP | EBPF_JSLT | BPF_X)] = {
1685 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1686 .off = { .min = 0, .max = UINT16_MAX},
1687 .imm = { .min = 0, .max = 0},
1689 [(BPF_JMP | EBPF_JSGE | BPF_X)] = {
1690 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1691 .off = { .min = 0, .max = UINT16_MAX},
1692 .imm = { .min = 0, .max = 0},
1695 [(BPF_JMP | EBPF_JSLE | BPF_X)] = {
1696 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1697 .off = { .min = 0, .max = UINT16_MAX},
1698 .imm = { .min = 0, .max = 0},
1701 [(BPF_JMP | BPF_JSET | BPF_X)] = {
1702 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1703 .off = { .min = 0, .max = UINT16_MAX},
1704 .imm = { .min = 0, .max = 0},
1707 /* call instruction */
1708 [(BPF_JMP | EBPF_CALL)] = {
1709 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1710 .off = { .min = 0, .max = 0},
1711 .imm = { .min = 0, .max = UINT32_MAX},
1714 /* ret instruction */
1715 [(BPF_JMP | EBPF_EXIT)] = {
1716 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1717 .off = { .min = 0, .max = 0},
1718 .imm = { .min = 0, .max = 0},
1724 * make sure that instruction syntax is valid,
1725 * and it fields don't violate partciular instrcution type restrictions.
1728 check_syntax(const struct ebpf_insn *ins)
1737 if (ins_chk[op].mask.dreg == 0)
1738 return "invalid opcode";
1740 if ((ins_chk[op].mask.dreg & 1 << ins->dst_reg) == 0)
1741 return "invalid dst-reg field";
1743 if ((ins_chk[op].mask.sreg & 1 << ins->src_reg) == 0)
1744 return "invalid src-reg field";
1747 if (ins_chk[op].off.min > off || ins_chk[op].off.max < off)
1748 return "invalid off field";
1751 if (ins_chk[op].imm.min > imm || ins_chk[op].imm.max < imm)
1752 return "invalid imm field";
1754 if (ins_chk[op].check != NULL)
1755 return ins_chk[op].check(ins);
1761 * helper function, return instruction index for the given node.
1764 get_node_idx(const struct bpf_verifier *bvf, const struct inst_node *node)
1766 return node - bvf->in;
1770 * helper function, used to walk through constructed CFG.
1772 static struct inst_node *
1773 get_next_node(struct bpf_verifier *bvf, struct inst_node *node)
1775 uint32_t ce, ne, dst;
1778 ce = node->cur_edge;
1783 dst = node->edge_dest[ce];
1784 return bvf->in + dst;
1788 set_node_colour(struct bpf_verifier *bvf, struct inst_node *node,
1793 prev = node->colour;
1796 bvf->node_colour[prev]--;
1797 bvf->node_colour[new]++;
1801 * helper function, add new edge between two nodes.
1804 add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
1808 if (nidx > bvf->prm->nb_ins) {
1809 RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
1811 __func__, get_node_idx(bvf, node), nidx);
1816 if (ne >= RTE_DIM(node->edge_dest)) {
1817 RTE_BPF_LOG(ERR, "%s: internal error at pc: %u\n",
1818 __func__, get_node_idx(bvf, node));
1822 node->edge_dest[ne] = nidx;
1823 node->nb_edge = ne + 1;
1828 * helper function, determine type of edge between two nodes.
1831 set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
1832 const struct inst_node *next)
1834 uint32_t ce, clr, type;
1836 ce = node->cur_edge - 1;
1839 type = UNKNOWN_EDGE;
1843 else if (clr == GREY)
1845 else if (clr == BLACK)
1847 * in fact it could be either direct or cross edge,
1848 * but for now, we don't need to distinguish between them.
1852 node->edge_type[ce] = type;
1853 bvf->edge_type[type]++;
1856 static struct inst_node *
1857 get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
1859 return bvf->in + node->prev_node;
1863 * Depth-First Search (DFS) through previously constructed
1864 * Control Flow Graph (CFG).
1865 * Information collected at this path would be used later
1866 * to determine is there any loops, and/or unreachable instructions.
1869 dfs(struct bpf_verifier *bvf)
1871 struct inst_node *next, *node;
1874 while (node != NULL) {
1876 if (node->colour == WHITE)
1877 set_node_colour(bvf, node, GREY);
1879 if (node->colour == GREY) {
1881 /* find next unprocessed child node */
1883 next = get_next_node(bvf, node);
1886 set_edge_type(bvf, node, next);
1887 } while (next->colour != WHITE);
1890 /* proceed with next child */
1891 next->prev_node = get_node_idx(bvf, node);
1895 * finished with current node and all it's kids,
1896 * proceed with parent
1898 set_node_colour(bvf, node, BLACK);
1900 node = get_prev_node(bvf, node);
1908 * report unreachable instructions.
1911 log_unreachable(const struct bpf_verifier *bvf)
1914 struct inst_node *node;
1915 const struct ebpf_insn *ins;
1917 for (i = 0; i != bvf->prm->nb_ins; i++) {
1920 ins = bvf->prm->ins + i;
1922 if (node->colour == WHITE &&
1923 ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
1924 RTE_BPF_LOG(ERR, "unreachable code at pc: %u;\n", i);
1929 * report loops detected.
1932 log_loop(const struct bpf_verifier *bvf)
1935 struct inst_node *node;
1937 for (i = 0; i != bvf->prm->nb_ins; i++) {
1940 if (node->colour != BLACK)
1943 for (j = 0; j != node->nb_edge; j++) {
1944 if (node->edge_type[j] == BACK_EDGE)
1946 "loop at pc:%u --> pc:%u;\n",
1947 i, node->edge_dest[j]);
1953 * First pass goes though all instructions in the set, checks that each
1954 * instruction is a valid one (correct syntax, valid field values, etc.)
1955 * and constructs control flow graph (CFG).
1956 * Then deapth-first search is performed over the constructed graph.
1957 * Programs with unreachable instructions and/or loops will be rejected.
1960 validate(struct bpf_verifier *bvf)
1964 struct inst_node *node;
1965 const struct ebpf_insn *ins;
1969 for (i = 0; i < bvf->prm->nb_ins; i++) {
1971 ins = bvf->prm->ins + i;
1974 err = check_syntax(ins);
1976 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
1982 * construct CFG, jcc nodes have to outgoing edges,
1983 * 'exit' nodes - none, all others nodes have exaclty one
1986 switch (ins->code) {
1987 case (BPF_JMP | EBPF_EXIT):
1989 case (BPF_JMP | BPF_JEQ | BPF_K):
1990 case (BPF_JMP | EBPF_JNE | BPF_K):
1991 case (BPF_JMP | BPF_JGT | BPF_K):
1992 case (BPF_JMP | EBPF_JLT | BPF_K):
1993 case (BPF_JMP | BPF_JGE | BPF_K):
1994 case (BPF_JMP | EBPF_JLE | BPF_K):
1995 case (BPF_JMP | EBPF_JSGT | BPF_K):
1996 case (BPF_JMP | EBPF_JSLT | BPF_K):
1997 case (BPF_JMP | EBPF_JSGE | BPF_K):
1998 case (BPF_JMP | EBPF_JSLE | BPF_K):
1999 case (BPF_JMP | BPF_JSET | BPF_K):
2000 case (BPF_JMP | BPF_JEQ | BPF_X):
2001 case (BPF_JMP | EBPF_JNE | BPF_X):
2002 case (BPF_JMP | BPF_JGT | BPF_X):
2003 case (BPF_JMP | EBPF_JLT | BPF_X):
2004 case (BPF_JMP | BPF_JGE | BPF_X):
2005 case (BPF_JMP | EBPF_JLE | BPF_X):
2006 case (BPF_JMP | EBPF_JSGT | BPF_X):
2007 case (BPF_JMP | EBPF_JSLT | BPF_X):
2008 case (BPF_JMP | EBPF_JSGE | BPF_X):
2009 case (BPF_JMP | EBPF_JSLE | BPF_X):
2010 case (BPF_JMP | BPF_JSET | BPF_X):
2011 rc |= add_edge(bvf, node, i + ins->off + 1);
2012 rc |= add_edge(bvf, node, i + 1);
2013 bvf->nb_jcc_nodes++;
2015 case (BPF_JMP | BPF_JA):
2016 rc |= add_edge(bvf, node, i + ins->off + 1);
2018 /* load 64 bit immediate value */
2019 case (BPF_LD | BPF_IMM | EBPF_DW):
2020 rc |= add_edge(bvf, node, i + 2);
2024 rc |= add_edge(bvf, node, i + 1);
2029 bvf->node_colour[WHITE]++;
2037 RTE_BPF_LOG(DEBUG, "%s(%p) stats:\n"
2039 "nb_jcc_nodes=%u;\n"
2040 "node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
2041 "edge_type={[UNKNOWN]=%u, [TREE]=%u, [BACK]=%u, [CROSS]=%u};\n",
2045 bvf->node_colour[WHITE], bvf->node_colour[GREY],
2046 bvf->node_colour[BLACK],
2047 bvf->edge_type[UNKNOWN_EDGE], bvf->edge_type[TREE_EDGE],
2048 bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
2050 if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
2051 RTE_BPF_LOG(ERR, "%s(%p) unreachable instructions;\n",
2053 log_unreachable(bvf);
2057 if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
2058 bvf->edge_type[UNKNOWN_EDGE] != 0) {
2059 RTE_BPF_LOG(ERR, "%s(%p) DFS internal error;\n",
2064 if (bvf->edge_type[BACK_EDGE] != 0) {
2065 RTE_BPF_LOG(ERR, "%s(%p) loops detected;\n",
2075 * helper functions get/free eval states.
2077 static struct bpf_eval_state *
2078 pull_eval_state(struct bpf_verifier *bvf)
2082 n = bvf->evst_pool.cur;
2083 if (n == bvf->evst_pool.num)
2086 bvf->evst_pool.cur = n + 1;
2087 return bvf->evst_pool.ent + n;
2091 push_eval_state(struct bpf_verifier *bvf)
2093 bvf->evst_pool.cur--;
2097 evst_pool_fini(struct bpf_verifier *bvf)
2100 free(bvf->evst_pool.ent);
2101 memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
2105 evst_pool_init(struct bpf_verifier *bvf)
2109 n = bvf->nb_jcc_nodes + 1;
2111 bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
2112 if (bvf->evst_pool.ent == NULL)
2115 bvf->evst_pool.num = n;
2116 bvf->evst_pool.cur = 0;
2118 bvf->evst = pull_eval_state(bvf);
2123 * Save current eval state.
2126 save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2128 struct bpf_eval_state *st;
2130 /* get new eval_state for this node */
2131 st = pull_eval_state(bvf);
2134 "%s: internal error (out of space) at pc: %u\n",
2135 __func__, get_node_idx(bvf, node));
2139 /* make a copy of current state */
2140 memcpy(st, bvf->evst, sizeof(*st));
2142 /* swap current state with new one */
2143 node->evst = bvf->evst;
2146 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2147 __func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
2153 * Restore previous eval state and mark current eval state as free.
2156 restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2158 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2159 __func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
2161 bvf->evst = node->evst;
2163 push_eval_state(bvf);
2167 log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
2168 uint32_t pc, int32_t loglvl)
2170 const struct bpf_eval_state *st;
2171 const struct bpf_reg_val *rv;
2173 rte_log(loglvl, rte_bpf_logtype, "%s(pc=%u):\n", __func__, pc);
2176 rv = st->rv + ins->dst_reg;
2178 rte_log(loglvl, rte_bpf_logtype,
2180 "\tv={type=%u, size=%zu},\n"
2181 "\tmask=0x%" PRIx64 ",\n"
2182 "\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
2183 "\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
2186 rv->v.type, rv->v.size,
2188 rv->u.min, rv->u.max,
2189 rv->s.min, rv->s.max);
2193 * Do second pass through CFG and try to evaluate instructions
2194 * via each possible path.
2195 * Right now evaluation functionality is quite limited.
2196 * Still need to add extra checks for:
2197 * - use/return uninitialized registers.
2198 * - use uninitialized data from the stack.
2199 * - memory boundaries violation.
2202 evaluate(struct bpf_verifier *bvf)
2207 const struct ebpf_insn *ins;
2208 struct inst_node *next, *node;
2210 /* initial state of frame pointer */
2211 static const struct bpf_reg_val rvfp = {
2213 .type = BPF_ARG_PTR_STACK,
2214 .size = MAX_BPF_STACK_SIZE,
2217 .u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2218 .s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2221 bvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;
2222 bvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;
2223 if (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)
2224 eval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);
2226 bvf->evst->rv[EBPF_REG_10] = rvfp;
2228 ins = bvf->prm->ins;
2233 while (node != NULL && rc == 0) {
2236 * current node evaluation, make sure we evaluate
2237 * each node only once.
2242 idx = get_node_idx(bvf, node);
2245 /* for jcc node make a copy of evaluatoion state */
2246 if (node->nb_edge > 1)
2247 rc |= save_eval_state(bvf, node);
2249 if (ins_chk[op].eval != NULL && rc == 0) {
2250 err = ins_chk[op].eval(bvf, ins + idx);
2252 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
2253 __func__, err, idx);
2258 log_eval_state(bvf, ins + idx, idx, RTE_LOG_DEBUG);
2262 /* proceed through CFG */
2263 next = get_next_node(bvf, node);
2266 /* proceed with next child */
2267 if (node->cur_edge == node->nb_edge &&
2269 restore_eval_state(bvf, node);
2271 next->prev_node = get_node_idx(bvf, node);
2275 * finished with current node and all it's kids,
2276 * proceed with parent
2279 node = get_prev_node(bvf, node);
2282 if (node == bvf->in)
2291 bpf_validate(struct rte_bpf *bpf)
2294 struct bpf_verifier bvf;
2296 /* check input argument type, don't allow mbuf ptr on 32-bit */
2297 if (bpf->prm.prog_arg.type != RTE_BPF_ARG_RAW &&
2298 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
2299 (sizeof(uint64_t) != sizeof(uintptr_t) ||
2300 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
2301 RTE_BPF_LOG(ERR, "%s: unsupported argument type\n", __func__);
2305 memset(&bvf, 0, sizeof(bvf));
2306 bvf.prm = &bpf->prm;
2307 bvf.in = calloc(bpf->prm.nb_ins, sizeof(bvf.in[0]));
2311 rc = validate(&bvf);
2314 rc = evst_pool_init(&bvf);
2316 rc = evaluate(&bvf);
2317 evst_pool_fini(&bvf);
2322 /* copy collected info */
2324 bpf->stack_sz = bvf.stack_sz;