1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
12 #include <rte_common.h>
14 #include <rte_byteorder.h>
18 #define BPF_ARG_PTR_STACK RTE_BPF_ARG_RESERVED
33 struct bpf_eval_state {
34 struct bpf_reg_val rv[EBPF_REG_NUM];
35 struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
38 /* possible instruction node colour */
46 /* possible edge types */
61 uint8_t edge_type[MAX_EDGES];
62 uint32_t edge_dest[MAX_EDGES];
64 struct bpf_eval_state *evst;
68 const struct rte_bpf_prm *prm;
72 uint32_t nb_jcc_nodes;
73 uint32_t nb_ldmb_nodes;
74 uint32_t node_colour[MAX_NODE_COLOUR];
75 uint32_t edge_type[MAX_EDGE_TYPE];
76 struct bpf_eval_state *evst;
77 struct inst_node *evin;
81 struct bpf_eval_state *ent;
85 struct bpf_ins_check {
98 const char * (*check)(const struct ebpf_insn *);
99 const char * (*eval)(struct bpf_verifier *, const struct ebpf_insn *);
102 #define ALL_REGS RTE_LEN2MASK(EBPF_REG_NUM, uint16_t)
103 #define WRT_REGS RTE_LEN2MASK(EBPF_REG_10, uint16_t)
104 #define ZERO_REG RTE_LEN2MASK(EBPF_REG_1, uint16_t)
106 /* For LD_IND R6 is an implicit CTX register. */
107 #define IND_SRC_REGS (WRT_REGS ^ 1 << EBPF_REG_6)
110 * check and evaluate functions for particular instruction types.
114 check_alu_bele(const struct ebpf_insn *ins)
116 if (ins->imm != 16 && ins->imm != 32 && ins->imm != 64)
117 return "invalid imm field";
122 eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
125 if (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)
126 return "undefined return value";
130 /* setup max possible with this mask bounds */
132 eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)
139 eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)
141 rv->s.max = mask >> 1;
142 rv->s.min = rv->s.max ^ UINT64_MAX;
146 eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)
148 eval_umax_bound(rv, mask);
149 eval_smax_bound(rv, mask);
153 eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)
155 eval_max_bound(rv, mask);
156 rv->v.type = RTE_BPF_ARG_RAW;
161 eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)
171 eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)
175 v = (uint64_t)imm & mask;
177 rv->v.type = RTE_BPF_ARG_RAW;
178 eval_fill_imm64(rv, mask, v);
182 eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
186 struct bpf_reg_val *rd;
188 val = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;
190 rd = bvf->evst->rv + ins->dst_reg;
191 rd->v.type = RTE_BPF_ARG_RAW;
192 eval_fill_imm64(rd, UINT64_MAX, val);
194 for (i = 0; i != bvf->prm->nb_xsym; i++) {
196 /* load of external variable */
197 if (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&
198 (uintptr_t)bvf->prm->xsym[i].var.val == val) {
199 rd->v = bvf->prm->xsym[i].var.desc;
200 eval_fill_imm64(rd, UINT64_MAX, 0);
209 eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)
211 struct bpf_reg_val rt;
213 rt.u.min = rv->u.min & mask;
214 rt.u.max = rv->u.max & mask;
215 if (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {
216 rv->u.max = RTE_MAX(rt.u.max, mask);
220 eval_smax_bound(&rt, mask);
221 rv->s.max = RTE_MIN(rt.s.max, rv->s.max);
222 rv->s.min = RTE_MAX(rt.s.min, rv->s.min);
228 eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
230 struct bpf_reg_val rv;
232 rv.u.min = (rd->u.min + rs->u.min) & msk;
233 rv.u.max = (rd->u.max + rs->u.max) & msk;
234 rv.s.min = (rd->s.min + rs->s.min) & msk;
235 rv.s.max = (rd->s.max + rs->s.max) & msk;
238 * if at least one of the operands is not constant,
239 * then check for overflow
241 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
242 (rv.u.min < rd->u.min || rv.u.max < rd->u.max))
243 eval_umax_bound(&rv, msk);
245 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
246 (((rs->s.min < 0 && rv.s.min > rd->s.min) ||
247 rv.s.min < rd->s.min) ||
248 ((rs->s.max < 0 && rv.s.max > rd->s.max) ||
249 rv.s.max < rd->s.max)))
250 eval_smax_bound(&rv, msk);
257 eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
259 struct bpf_reg_val rv;
261 rv.u.min = (rd->u.min - rs->u.max) & msk;
262 rv.u.max = (rd->u.max - rs->u.min) & msk;
263 rv.s.min = (rd->s.min - rs->s.max) & msk;
264 rv.s.max = (rd->s.max - rs->s.min) & msk;
267 * if at least one of the operands is not constant,
268 * then check for overflow
270 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
271 (rv.u.min > rd->u.min || rv.u.max > rd->u.max))
272 eval_umax_bound(&rv, msk);
274 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
275 (((rs->s.min < 0 && rv.s.min < rd->s.min) ||
276 rv.s.min > rd->s.min) ||
277 ((rs->s.max < 0 && rv.s.max < rd->s.max) ||
278 rv.s.max > rd->s.max)))
279 eval_smax_bound(&rv, msk);
286 eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
289 /* check if shift value is less then max result bits */
290 if (rs->u.max >= opsz) {
291 eval_max_bound(rd, msk);
295 /* check for overflow */
296 if (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))
297 eval_umax_bound(rd, msk);
299 rd->u.max <<= rs->u.max;
300 rd->u.min <<= rs->u.min;
303 /* check that dreg values are and would remain always positive */
304 if ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=
305 RTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))
306 eval_smax_bound(rd, msk);
308 rd->s.max <<= rs->u.max;
309 rd->s.min <<= rs->u.min;
314 eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
317 /* check if shift value is less then max result bits */
318 if (rs->u.max >= opsz) {
319 eval_max_bound(rd, msk);
323 rd->u.max >>= rs->u.min;
324 rd->u.min >>= rs->u.max;
326 /* check that dreg values are always positive */
327 if ((uint64_t)rd->s.min >> (opsz - 1) != 0)
328 eval_smax_bound(rd, msk);
330 rd->s.max >>= rs->u.min;
331 rd->s.min >>= rs->u.max;
336 eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
341 /* check if shift value is less then max result bits */
342 if (rs->u.max >= opsz) {
343 eval_max_bound(rd, msk);
347 rd->u.max = (int64_t)rd->u.max >> rs->u.min;
348 rd->u.min = (int64_t)rd->u.min >> rs->u.max;
350 /* if we have 32-bit values - extend them to 64-bit */
351 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
359 rd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;
361 rd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;
364 rd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;
366 rd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;
370 eval_umax_bits(uint64_t v, size_t opsz)
375 v = __builtin_clzll(v);
376 return RTE_LEN2MASK(opsz - v, uint64_t);
379 /* estimate max possible value for (v1 & v2) */
381 eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)
383 v1 = eval_umax_bits(v1, opsz);
384 v2 = eval_umax_bits(v2, opsz);
388 /* estimate max possible value for (v1 | v2) */
390 eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)
392 v1 = eval_umax_bits(v1, opsz);
393 v2 = eval_umax_bits(v2, opsz);
398 eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
401 /* both operands are constants */
402 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
403 rd->u.min &= rs->u.min;
404 rd->u.max &= rs->u.max;
406 rd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);
407 rd->u.min &= rs->u.min;
410 /* both operands are constants */
411 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
412 rd->s.min &= rs->s.min;
413 rd->s.max &= rs->s.max;
414 /* at least one of operand is non-negative */
415 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
416 rd->s.max = eval_uand_max(rd->s.max & (msk >> 1),
417 rs->s.max & (msk >> 1), opsz);
418 rd->s.min &= rs->s.min;
420 eval_smax_bound(rd, msk);
424 eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
427 /* both operands are constants */
428 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
429 rd->u.min |= rs->u.min;
430 rd->u.max |= rs->u.max;
432 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
433 rd->u.min |= rs->u.min;
436 /* both operands are constants */
437 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
438 rd->s.min |= rs->s.min;
439 rd->s.max |= rs->s.max;
441 /* both operands are non-negative */
442 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
443 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
444 rd->s.min |= rs->s.min;
446 eval_smax_bound(rd, msk);
450 eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
453 /* both operands are constants */
454 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
455 rd->u.min ^= rs->u.min;
456 rd->u.max ^= rs->u.max;
458 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
462 /* both operands are constants */
463 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
464 rd->s.min ^= rs->s.min;
465 rd->s.max ^= rs->s.max;
467 /* both operands are non-negative */
468 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
469 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
472 eval_smax_bound(rd, msk);
476 eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
479 /* both operands are constants */
480 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
481 rd->u.min = (rd->u.min * rs->u.min) & msk;
482 rd->u.max = (rd->u.max * rs->u.max) & msk;
483 /* check for overflow */
484 } else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {
485 rd->u.max *= rs->u.max;
486 rd->u.min *= rd->u.min;
488 eval_umax_bound(rd, msk);
490 /* both operands are constants */
491 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
492 rd->s.min = (rd->s.min * rs->s.min) & msk;
493 rd->s.max = (rd->s.max * rs->s.max) & msk;
494 /* check that both operands are positive and no overflow */
495 } else if (rd->s.min >= 0 && rs->s.min >= 0) {
496 rd->s.max *= rs->s.max;
497 rd->s.min *= rd->s.min;
499 eval_smax_bound(rd, msk);
503 eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,
504 size_t opsz, uint64_t msk)
506 /* both operands are constants */
507 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
509 return "division by 0";
511 rd->u.min /= rs->u.min;
512 rd->u.max /= rs->u.max;
514 rd->u.min %= rs->u.min;
515 rd->u.max %= rs->u.max;
519 rd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);
521 rd->u.max = rd->u.max;
525 /* if we have 32-bit values - extend them to 64-bit */
526 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
527 rd->s.min = (int32_t)rd->s.min;
528 rd->s.max = (int32_t)rd->s.max;
529 rs->s.min = (int32_t)rs->s.min;
530 rs->s.max = (int32_t)rs->s.max;
533 /* both operands are constants */
534 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
536 return "division by 0";
538 rd->s.min /= rs->s.min;
539 rd->s.max /= rs->s.max;
541 rd->s.min %= rs->s.min;
542 rd->s.max %= rs->s.max;
544 } else if (op == BPF_MOD) {
545 rd->s.min = RTE_MAX(rd->s.max, 0);
546 rd->s.min = RTE_MIN(rd->s.min, 0);
548 eval_smax_bound(rd, msk);
557 eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
562 /* if we have 32-bit values - extend them to 64-bit */
563 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
564 rd->u.min = (int32_t)rd->u.min;
565 rd->u.max = (int32_t)rd->u.max;
568 ux = -(int64_t)rd->u.min & msk;
569 uy = -(int64_t)rd->u.max & msk;
571 rd->u.max = RTE_MAX(ux, uy);
572 rd->u.min = RTE_MIN(ux, uy);
574 /* if we have 32-bit values - extend them to 64-bit */
575 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
576 rd->s.min = (int32_t)rd->s.min;
577 rd->s.max = (int32_t)rd->s.max;
580 sx = -rd->s.min & msk;
581 sy = -rd->s.max & msk;
583 rd->s.max = RTE_MAX(sx, sy);
584 rd->s.min = RTE_MIN(sx, sy);
588 eval_ld_mbuf(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
591 struct bpf_reg_val *rv, ri, rs;
593 mode = BPF_MODE(ins->code);
595 /* R6 is an implicit input that must contain pointer to mbuf */
596 if (bvf->evst->rv[EBPF_REG_6].v.type != RTE_BPF_ARG_PTR_MBUF)
597 return "invalid type for implicit ctx register";
599 if (mode == BPF_IND) {
600 rs = bvf->evst->rv[ins->src_reg];
601 if (rs.v.type != RTE_BPF_ARG_RAW)
602 return "unexpected type for src register";
604 eval_fill_imm(&ri, UINT64_MAX, ins->imm);
605 eval_add(&rs, &ri, UINT64_MAX);
607 if (rs.s.max < 0 || rs.u.min > UINT32_MAX)
608 return "mbuf boundary violation";
611 /* R1-R5 scratch registers */
612 for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
613 bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
615 /* R0 is an implicit output, contains data fetched from the packet */
616 rv = bvf->evst->rv + EBPF_REG_0;
617 rv->v.size = bpf_size(BPF_SIZE(ins->code));
618 eval_fill_max_bound(rv, RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
624 * check that destination and source operand are in defined state.
627 eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)
629 if (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)
630 return "dest reg value is undefined";
631 if (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)
632 return "src reg value is undefined";
637 eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
643 struct bpf_eval_state *st;
644 struct bpf_reg_val *rd, rs;
646 opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
647 sizeof(uint32_t) : sizeof(uint64_t);
648 opsz = opsz * CHAR_BIT;
649 msk = RTE_LEN2MASK(opsz, uint64_t);
652 rd = st->rv + ins->dst_reg;
654 if (BPF_SRC(ins->code) == BPF_X) {
655 rs = st->rv[ins->src_reg];
656 eval_apply_mask(&rs, msk);
658 eval_fill_imm(&rs, msk, ins->imm);
660 eval_apply_mask(rd, msk);
662 op = BPF_OP(ins->code);
664 /* Allow self-xor as way to zero register */
665 if (op == BPF_XOR && BPF_SRC(ins->code) == BPF_X &&
666 ins->src_reg == ins->dst_reg) {
667 eval_fill_imm(&rs, UINT64_MAX, 0);
668 eval_fill_imm(rd, UINT64_MAX, 0);
671 err = eval_defined((op != EBPF_MOV) ? rd : NULL,
672 (op != BPF_NEG) ? &rs : NULL);
677 eval_add(rd, &rs, msk);
678 else if (op == BPF_SUB)
679 eval_sub(rd, &rs, msk);
680 else if (op == BPF_LSH)
681 eval_lsh(rd, &rs, opsz, msk);
682 else if (op == BPF_RSH)
683 eval_rsh(rd, &rs, opsz, msk);
684 else if (op == EBPF_ARSH)
685 eval_arsh(rd, &rs, opsz, msk);
686 else if (op == BPF_AND)
687 eval_and(rd, &rs, opsz, msk);
688 else if (op == BPF_OR)
689 eval_or(rd, &rs, opsz, msk);
690 else if (op == BPF_XOR)
691 eval_xor(rd, &rs, opsz, msk);
692 else if (op == BPF_MUL)
693 eval_mul(rd, &rs, opsz, msk);
694 else if (op == BPF_DIV || op == BPF_MOD)
695 err = eval_divmod(op, rd, &rs, opsz, msk);
696 else if (op == BPF_NEG)
697 eval_neg(rd, opsz, msk);
698 else if (op == EBPF_MOV)
701 eval_max_bound(rd, msk);
707 eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
710 struct bpf_eval_state *st;
711 struct bpf_reg_val *rd;
714 msk = RTE_LEN2MASK(ins->imm, uint64_t);
717 rd = st->rv + ins->dst_reg;
719 err = eval_defined(rd, NULL);
723 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
724 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))
725 eval_max_bound(rd, msk);
727 eval_apply_mask(rd, msk);
729 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))
730 eval_max_bound(rd, msk);
732 eval_apply_mask(rd, msk);
739 eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,
740 uint32_t align, int16_t off)
742 struct bpf_reg_val rv;
744 /* calculate reg + offset */
745 eval_fill_imm(&rv, rm->mask, off);
746 eval_add(rm, &rv, rm->mask);
748 if (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)
749 return "destination is not a pointer";
751 if (rm->mask != UINT64_MAX)
752 return "pointer truncation";
754 if (rm->u.max + opsz > rm->v.size ||
755 (uint64_t)rm->s.max + opsz > rm->v.size ||
757 return "memory boundary violation";
759 if (rm->u.max % align != 0)
760 return "unaligned memory access";
762 if (rm->v.type == BPF_ARG_PTR_STACK) {
764 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
765 rm->u.max != (uint64_t)rm->s.max)
766 return "stack access with variable offset";
768 bvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);
770 /* pointer to mbuf */
771 } else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {
773 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
774 rm->u.max != (uint64_t)rm->s.max)
775 return "mbuf access with variable offset";
782 eval_max_load(struct bpf_reg_val *rv, uint64_t mask)
784 eval_umax_bound(rv, mask);
786 /* full 64-bit load */
787 if (mask == UINT64_MAX)
788 eval_smax_bound(rv, mask);
790 /* zero-extend load */
791 rv->s.min = rv->u.min;
792 rv->s.max = rv->u.max;
797 eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
802 struct bpf_eval_state *st;
803 struct bpf_reg_val *rd, rs;
804 const struct bpf_reg_val *sv;
807 rd = st->rv + ins->dst_reg;
808 rs = st->rv[ins->src_reg];
809 opsz = bpf_size(BPF_SIZE(ins->code));
810 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
812 err = eval_ptr(bvf, &rs, opsz, 1, ins->off);
816 if (rs.v.type == BPF_ARG_PTR_STACK) {
818 sv = st->sv + rs.u.max / sizeof(uint64_t);
819 if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)
820 return "undefined value on the stack";
824 /* pointer to mbuf */
825 } else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {
827 if (rs.u.max == offsetof(struct rte_mbuf, next)) {
828 eval_fill_imm(rd, msk, 0);
830 } else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {
831 eval_fill_imm(rd, msk, 0);
832 rd->v.type = RTE_BPF_ARG_PTR;
833 rd->v.size = rs.v.buf_size;
834 } else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {
835 eval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);
836 rd->v.type = RTE_BPF_ARG_RAW;
838 eval_max_load(rd, msk);
839 rd->v.type = RTE_BPF_ARG_RAW;
842 /* pointer to raw data */
844 eval_max_load(rd, msk);
845 rd->v.type = RTE_BPF_ARG_RAW;
852 eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)
856 static const struct {
859 } mbuf_ro_fileds[] = {
860 { .off = offsetof(struct rte_mbuf, buf_addr), },
861 { .off = offsetof(struct rte_mbuf, refcnt), },
862 { .off = offsetof(struct rte_mbuf, nb_segs), },
863 { .off = offsetof(struct rte_mbuf, buf_len), },
864 { .off = offsetof(struct rte_mbuf, pool), },
865 { .off = offsetof(struct rte_mbuf, next), },
866 { .off = offsetof(struct rte_mbuf, priv_size), },
869 for (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&
870 (mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=
871 rv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);
875 if (i != RTE_DIM(mbuf_ro_fileds))
876 return "store to the read-only mbuf field";
883 eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
888 struct bpf_eval_state *st;
889 struct bpf_reg_val rd, rs, *sv;
891 opsz = bpf_size(BPF_SIZE(ins->code));
892 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
895 rd = st->rv[ins->dst_reg];
897 if (BPF_CLASS(ins->code) == BPF_STX) {
898 rs = st->rv[ins->src_reg];
899 eval_apply_mask(&rs, msk);
901 eval_fill_imm(&rs, msk, ins->imm);
903 err = eval_defined(NULL, &rs);
907 err = eval_ptr(bvf, &rd, opsz, 1, ins->off);
911 if (rd.v.type == BPF_ARG_PTR_STACK) {
913 sv = st->sv + rd.u.max / sizeof(uint64_t);
914 if (BPF_CLASS(ins->code) == BPF_STX &&
915 BPF_MODE(ins->code) == EBPF_XADD)
916 eval_max_bound(sv, msk);
920 /* pointer to mbuf */
921 } else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {
922 err = eval_mbuf_store(&rd, opsz);
931 eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,
932 struct bpf_reg_val *rv)
935 struct bpf_eval_state *st;
940 if (rv->v.type == RTE_BPF_ARG_UNDEF)
941 return "Undefined argument type";
943 if (arg->type != rv->v.type &&
944 arg->type != RTE_BPF_ARG_RAW &&
945 (arg->type != RTE_BPF_ARG_PTR ||
946 RTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))
947 return "Invalid argument type";
951 /* argument is a pointer */
952 if (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {
954 err = eval_ptr(bvf, rv, arg->size, 1, 0);
957 * pointer to the variable on the stack is passed
958 * as an argument, mark stack space it occupies as initialized.
960 if (err == NULL && rv->v.type == BPF_ARG_PTR_STACK) {
962 i = rv->u.max / sizeof(uint64_t);
963 n = i + arg->size / sizeof(uint64_t);
965 eval_fill_max_bound(st->sv + i, UINT64_MAX);
975 eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
978 struct bpf_reg_val *rv;
979 const struct rte_bpf_xsym *xsym;
984 if (idx >= bvf->prm->nb_xsym ||
985 bvf->prm->xsym[idx].type != RTE_BPF_XTYPE_FUNC)
986 return "invalid external function index";
988 /* for now don't support function calls on 32 bit platform */
989 if (sizeof(uint64_t) != sizeof(uintptr_t))
990 return "function calls are supported only for 64 bit apps";
992 xsym = bvf->prm->xsym + idx;
994 /* evaluate function arguments */
996 for (i = 0; i != xsym->func.nb_args && err == NULL; i++) {
997 err = eval_func_arg(bvf, xsym->func.args + i,
998 bvf->evst->rv + EBPF_REG_1 + i);
1001 /* R1-R5 argument/scratch registers */
1002 for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
1003 bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
1005 /* update return value */
1007 rv = bvf->evst->rv + EBPF_REG_0;
1008 rv->v = xsym->func.ret;
1009 if (rv->v.type == RTE_BPF_ARG_RAW)
1010 eval_fill_max_bound(rv,
1011 RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
1012 else if (RTE_BPF_ARG_PTR_TYPE(rv->v.type) != 0)
1013 eval_fill_imm64(rv, UINTPTR_MAX, 0);
1019 eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)
1021 /* sreg is constant */
1022 if (trs->u.min == trs->u.max) {
1024 /* dreg is constant */
1025 } else if (trd->u.min == trd->u.max) {
1028 trd->u.max = RTE_MIN(trd->u.max, trs->u.max);
1029 trd->u.min = RTE_MAX(trd->u.min, trs->u.min);
1033 /* sreg is constant */
1034 if (trs->s.min == trs->s.max) {
1036 /* dreg is constant */
1037 } else if (trd->s.min == trd->s.max) {
1040 trd->s.max = RTE_MIN(trd->s.max, trs->s.max);
1041 trd->s.min = RTE_MAX(trd->s.min, trs->s.min);
1047 eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1048 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1050 frd->u.max = RTE_MIN(frd->u.max, frs->u.min);
1051 trd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);
1055 eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1056 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1058 frd->u.min = RTE_MAX(frd->u.min, frs->u.min);
1059 trd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);
1063 eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1064 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1066 frd->s.max = RTE_MIN(frd->s.max, frs->s.min);
1067 trd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);
1071 eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1072 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1074 frd->s.min = RTE_MAX(frd->s.min, frs->s.min);
1075 trd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);
1079 eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
1083 struct bpf_eval_state *fst, *tst;
1084 struct bpf_reg_val *frd, *frs, *trd, *trs;
1085 struct bpf_reg_val rvf, rvt;
1088 fst = bvf->evin->evst;
1090 frd = fst->rv + ins->dst_reg;
1091 trd = tst->rv + ins->dst_reg;
1093 if (BPF_SRC(ins->code) == BPF_X) {
1094 frs = fst->rv + ins->src_reg;
1095 trs = tst->rv + ins->src_reg;
1099 eval_fill_imm(frs, UINT64_MAX, ins->imm);
1100 eval_fill_imm(trs, UINT64_MAX, ins->imm);
1103 err = eval_defined(trd, trs);
1107 op = BPF_OP(ins->code);
1110 eval_jeq_jne(trd, trs);
1111 else if (op == EBPF_JNE)
1112 eval_jeq_jne(frd, frs);
1113 else if (op == BPF_JGT)
1114 eval_jgt_jle(trd, trs, frd, frs);
1115 else if (op == EBPF_JLE)
1116 eval_jgt_jle(frd, frs, trd, trs);
1117 else if (op == EBPF_JLT)
1118 eval_jlt_jge(trd, trs, frd, frs);
1119 else if (op == BPF_JGE)
1120 eval_jlt_jge(frd, frs, trd, trs);
1121 else if (op == EBPF_JSGT)
1122 eval_jsgt_jsle(trd, trs, frd, frs);
1123 else if (op == EBPF_JSLE)
1124 eval_jsgt_jsle(frd, frs, trd, trs);
1125 else if (op == EBPF_JSLT)
1126 eval_jslt_jsge(trd, trs, frd, frs);
1127 else if (op == EBPF_JSGE)
1128 eval_jslt_jsge(frd, frs, trd, trs);
1134 * validate parameters for each instruction type.
1136 static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = {
1137 /* ALU IMM 32-bit instructions */
1138 [(BPF_ALU | BPF_ADD | BPF_K)] = {
1139 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1140 .off = { .min = 0, .max = 0},
1141 .imm = { .min = 0, .max = UINT32_MAX,},
1144 [(BPF_ALU | BPF_SUB | BPF_K)] = {
1145 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1146 .off = { .min = 0, .max = 0},
1147 .imm = { .min = 0, .max = UINT32_MAX,},
1150 [(BPF_ALU | BPF_AND | BPF_K)] = {
1151 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1152 .off = { .min = 0, .max = 0},
1153 .imm = { .min = 0, .max = UINT32_MAX,},
1156 [(BPF_ALU | BPF_OR | BPF_K)] = {
1157 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1158 .off = { .min = 0, .max = 0},
1159 .imm = { .min = 0, .max = UINT32_MAX,},
1162 [(BPF_ALU | BPF_LSH | BPF_K)] = {
1163 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1164 .off = { .min = 0, .max = 0},
1165 .imm = { .min = 0, .max = UINT32_MAX,},
1168 [(BPF_ALU | BPF_RSH | BPF_K)] = {
1169 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1170 .off = { .min = 0, .max = 0},
1171 .imm = { .min = 0, .max = UINT32_MAX,},
1174 [(BPF_ALU | BPF_XOR | BPF_K)] = {
1175 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1176 .off = { .min = 0, .max = 0},
1177 .imm = { .min = 0, .max = UINT32_MAX,},
1180 [(BPF_ALU | BPF_MUL | BPF_K)] = {
1181 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1182 .off = { .min = 0, .max = 0},
1183 .imm = { .min = 0, .max = UINT32_MAX,},
1186 [(BPF_ALU | EBPF_MOV | BPF_K)] = {
1187 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1188 .off = { .min = 0, .max = 0},
1189 .imm = { .min = 0, .max = UINT32_MAX,},
1192 [(BPF_ALU | BPF_DIV | BPF_K)] = {
1193 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1194 .off = { .min = 0, .max = 0},
1195 .imm = { .min = 1, .max = UINT32_MAX},
1198 [(BPF_ALU | BPF_MOD | BPF_K)] = {
1199 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1200 .off = { .min = 0, .max = 0},
1201 .imm = { .min = 1, .max = UINT32_MAX},
1204 /* ALU IMM 64-bit instructions */
1205 [(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
1206 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1207 .off = { .min = 0, .max = 0},
1208 .imm = { .min = 0, .max = UINT32_MAX,},
1211 [(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
1212 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1213 .off = { .min = 0, .max = 0},
1214 .imm = { .min = 0, .max = UINT32_MAX,},
1217 [(EBPF_ALU64 | BPF_AND | BPF_K)] = {
1218 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1219 .off = { .min = 0, .max = 0},
1220 .imm = { .min = 0, .max = UINT32_MAX,},
1223 [(EBPF_ALU64 | BPF_OR | BPF_K)] = {
1224 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1225 .off = { .min = 0, .max = 0},
1226 .imm = { .min = 0, .max = UINT32_MAX,},
1229 [(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
1230 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1231 .off = { .min = 0, .max = 0},
1232 .imm = { .min = 0, .max = UINT32_MAX,},
1235 [(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
1236 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1237 .off = { .min = 0, .max = 0},
1238 .imm = { .min = 0, .max = UINT32_MAX,},
1241 [(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
1242 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1243 .off = { .min = 0, .max = 0},
1244 .imm = { .min = 0, .max = UINT32_MAX,},
1247 [(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
1248 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1249 .off = { .min = 0, .max = 0},
1250 .imm = { .min = 0, .max = UINT32_MAX,},
1253 [(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
1254 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1255 .off = { .min = 0, .max = 0},
1256 .imm = { .min = 0, .max = UINT32_MAX,},
1259 [(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
1260 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1261 .off = { .min = 0, .max = 0},
1262 .imm = { .min = 0, .max = UINT32_MAX,},
1265 [(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
1266 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1267 .off = { .min = 0, .max = 0},
1268 .imm = { .min = 1, .max = UINT32_MAX},
1271 [(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
1272 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1273 .off = { .min = 0, .max = 0},
1274 .imm = { .min = 1, .max = UINT32_MAX},
1277 /* ALU REG 32-bit instructions */
1278 [(BPF_ALU | BPF_ADD | BPF_X)] = {
1279 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1280 .off = { .min = 0, .max = 0},
1281 .imm = { .min = 0, .max = 0},
1284 [(BPF_ALU | BPF_SUB | BPF_X)] = {
1285 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1286 .off = { .min = 0, .max = 0},
1287 .imm = { .min = 0, .max = 0},
1290 [(BPF_ALU | BPF_AND | BPF_X)] = {
1291 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1292 .off = { .min = 0, .max = 0},
1293 .imm = { .min = 0, .max = 0},
1296 [(BPF_ALU | BPF_OR | BPF_X)] = {
1297 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1298 .off = { .min = 0, .max = 0},
1299 .imm = { .min = 0, .max = 0},
1302 [(BPF_ALU | BPF_LSH | BPF_X)] = {
1303 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1304 .off = { .min = 0, .max = 0},
1305 .imm = { .min = 0, .max = 0},
1308 [(BPF_ALU | BPF_RSH | BPF_X)] = {
1309 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1310 .off = { .min = 0, .max = 0},
1311 .imm = { .min = 0, .max = 0},
1314 [(BPF_ALU | BPF_XOR | BPF_X)] = {
1315 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1316 .off = { .min = 0, .max = 0},
1317 .imm = { .min = 0, .max = 0},
1320 [(BPF_ALU | BPF_MUL | BPF_X)] = {
1321 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1322 .off = { .min = 0, .max = 0},
1323 .imm = { .min = 0, .max = 0},
1326 [(BPF_ALU | BPF_DIV | BPF_X)] = {
1327 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1328 .off = { .min = 0, .max = 0},
1329 .imm = { .min = 0, .max = 0},
1332 [(BPF_ALU | BPF_MOD | BPF_X)] = {
1333 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1334 .off = { .min = 0, .max = 0},
1335 .imm = { .min = 0, .max = 0},
1338 [(BPF_ALU | EBPF_MOV | BPF_X)] = {
1339 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1340 .off = { .min = 0, .max = 0},
1341 .imm = { .min = 0, .max = 0},
1344 [(BPF_ALU | BPF_NEG)] = {
1345 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1346 .off = { .min = 0, .max = 0},
1347 .imm = { .min = 0, .max = 0},
1350 [(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
1351 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1352 .off = { .min = 0, .max = 0},
1353 .imm = { .min = 16, .max = 64},
1354 .check = check_alu_bele,
1357 [(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
1358 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1359 .off = { .min = 0, .max = 0},
1360 .imm = { .min = 16, .max = 64},
1361 .check = check_alu_bele,
1364 /* ALU REG 64-bit instructions */
1365 [(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
1366 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1367 .off = { .min = 0, .max = 0},
1368 .imm = { .min = 0, .max = 0},
1371 [(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
1372 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1373 .off = { .min = 0, .max = 0},
1374 .imm = { .min = 0, .max = 0},
1377 [(EBPF_ALU64 | BPF_AND | BPF_X)] = {
1378 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1379 .off = { .min = 0, .max = 0},
1380 .imm = { .min = 0, .max = 0},
1383 [(EBPF_ALU64 | BPF_OR | BPF_X)] = {
1384 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1385 .off = { .min = 0, .max = 0},
1386 .imm = { .min = 0, .max = 0},
1389 [(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
1390 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1391 .off = { .min = 0, .max = 0},
1392 .imm = { .min = 0, .max = 0},
1395 [(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
1396 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1397 .off = { .min = 0, .max = 0},
1398 .imm = { .min = 0, .max = 0},
1401 [(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
1402 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1403 .off = { .min = 0, .max = 0},
1404 .imm = { .min = 0, .max = 0},
1407 [(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
1408 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1409 .off = { .min = 0, .max = 0},
1410 .imm = { .min = 0, .max = 0},
1413 [(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
1414 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1415 .off = { .min = 0, .max = 0},
1416 .imm = { .min = 0, .max = 0},
1419 [(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
1420 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1421 .off = { .min = 0, .max = 0},
1422 .imm = { .min = 0, .max = 0},
1425 [(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
1426 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1427 .off = { .min = 0, .max = 0},
1428 .imm = { .min = 0, .max = 0},
1431 [(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
1432 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1433 .off = { .min = 0, .max = 0},
1434 .imm = { .min = 0, .max = 0},
1437 [(EBPF_ALU64 | BPF_NEG)] = {
1438 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1439 .off = { .min = 0, .max = 0},
1440 .imm = { .min = 0, .max = 0},
1443 /* load instructions */
1444 [(BPF_LDX | BPF_MEM | BPF_B)] = {
1445 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1446 .off = { .min = 0, .max = UINT16_MAX},
1447 .imm = { .min = 0, .max = 0},
1450 [(BPF_LDX | BPF_MEM | BPF_H)] = {
1451 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1452 .off = { .min = 0, .max = UINT16_MAX},
1453 .imm = { .min = 0, .max = 0},
1456 [(BPF_LDX | BPF_MEM | BPF_W)] = {
1457 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1458 .off = { .min = 0, .max = UINT16_MAX},
1459 .imm = { .min = 0, .max = 0},
1462 [(BPF_LDX | BPF_MEM | EBPF_DW)] = {
1463 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1464 .off = { .min = 0, .max = UINT16_MAX},
1465 .imm = { .min = 0, .max = 0},
1468 /* load 64 bit immediate value */
1469 [(BPF_LD | BPF_IMM | EBPF_DW)] = {
1470 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1471 .off = { .min = 0, .max = 0},
1472 .imm = { .min = 0, .max = UINT32_MAX},
1473 .eval = eval_ld_imm64,
1475 /* load absolute instructions */
1476 [(BPF_LD | BPF_ABS | BPF_B)] = {
1477 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1478 .off = { .min = 0, .max = 0},
1479 .imm = { .min = 0, .max = INT32_MAX},
1480 .eval = eval_ld_mbuf,
1482 [(BPF_LD | BPF_ABS | BPF_H)] = {
1483 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1484 .off = { .min = 0, .max = 0},
1485 .imm = { .min = 0, .max = INT32_MAX},
1486 .eval = eval_ld_mbuf,
1488 [(BPF_LD | BPF_ABS | BPF_W)] = {
1489 .mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
1490 .off = { .min = 0, .max = 0},
1491 .imm = { .min = 0, .max = INT32_MAX},
1492 .eval = eval_ld_mbuf,
1494 /* load indirect instructions */
1495 [(BPF_LD | BPF_IND | BPF_B)] = {
1496 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1497 .off = { .min = 0, .max = 0},
1498 .imm = { .min = 0, .max = UINT32_MAX},
1499 .eval = eval_ld_mbuf,
1501 [(BPF_LD | BPF_IND | BPF_H)] = {
1502 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1503 .off = { .min = 0, .max = 0},
1504 .imm = { .min = 0, .max = UINT32_MAX},
1505 .eval = eval_ld_mbuf,
1507 [(BPF_LD | BPF_IND | BPF_W)] = {
1508 .mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
1509 .off = { .min = 0, .max = 0},
1510 .imm = { .min = 0, .max = UINT32_MAX},
1511 .eval = eval_ld_mbuf,
1513 /* store REG instructions */
1514 [(BPF_STX | BPF_MEM | BPF_B)] = {
1515 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1516 .off = { .min = 0, .max = UINT16_MAX},
1517 .imm = { .min = 0, .max = 0},
1520 [(BPF_STX | BPF_MEM | BPF_H)] = {
1521 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1522 .off = { .min = 0, .max = UINT16_MAX},
1523 .imm = { .min = 0, .max = 0},
1526 [(BPF_STX | BPF_MEM | BPF_W)] = {
1527 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1528 .off = { .min = 0, .max = UINT16_MAX},
1529 .imm = { .min = 0, .max = 0},
1532 [(BPF_STX | BPF_MEM | EBPF_DW)] = {
1533 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1534 .off = { .min = 0, .max = UINT16_MAX},
1535 .imm = { .min = 0, .max = 0},
1538 /* atomic add instructions */
1539 [(BPF_STX | EBPF_XADD | BPF_W)] = {
1540 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1541 .off = { .min = 0, .max = UINT16_MAX},
1542 .imm = { .min = 0, .max = 0},
1545 [(BPF_STX | EBPF_XADD | EBPF_DW)] = {
1546 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1547 .off = { .min = 0, .max = UINT16_MAX},
1548 .imm = { .min = 0, .max = 0},
1551 /* store IMM instructions */
1552 [(BPF_ST | BPF_MEM | BPF_B)] = {
1553 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1554 .off = { .min = 0, .max = UINT16_MAX},
1555 .imm = { .min = 0, .max = UINT32_MAX},
1558 [(BPF_ST | BPF_MEM | BPF_H)] = {
1559 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1560 .off = { .min = 0, .max = UINT16_MAX},
1561 .imm = { .min = 0, .max = UINT32_MAX},
1564 [(BPF_ST | BPF_MEM | BPF_W)] = {
1565 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1566 .off = { .min = 0, .max = UINT16_MAX},
1567 .imm = { .min = 0, .max = UINT32_MAX},
1570 [(BPF_ST | BPF_MEM | EBPF_DW)] = {
1571 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1572 .off = { .min = 0, .max = UINT16_MAX},
1573 .imm = { .min = 0, .max = UINT32_MAX},
1576 /* jump instruction */
1577 [(BPF_JMP | BPF_JA)] = {
1578 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1579 .off = { .min = 0, .max = UINT16_MAX},
1580 .imm = { .min = 0, .max = 0},
1582 /* jcc IMM instructions */
1583 [(BPF_JMP | BPF_JEQ | BPF_K)] = {
1584 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1585 .off = { .min = 0, .max = UINT16_MAX},
1586 .imm = { .min = 0, .max = UINT32_MAX},
1589 [(BPF_JMP | EBPF_JNE | BPF_K)] = {
1590 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1591 .off = { .min = 0, .max = UINT16_MAX},
1592 .imm = { .min = 0, .max = UINT32_MAX},
1595 [(BPF_JMP | BPF_JGT | BPF_K)] = {
1596 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1597 .off = { .min = 0, .max = UINT16_MAX},
1598 .imm = { .min = 0, .max = UINT32_MAX},
1601 [(BPF_JMP | EBPF_JLT | BPF_K)] = {
1602 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1603 .off = { .min = 0, .max = UINT16_MAX},
1604 .imm = { .min = 0, .max = UINT32_MAX},
1607 [(BPF_JMP | BPF_JGE | BPF_K)] = {
1608 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1609 .off = { .min = 0, .max = UINT16_MAX},
1610 .imm = { .min = 0, .max = UINT32_MAX},
1613 [(BPF_JMP | EBPF_JLE | BPF_K)] = {
1614 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1615 .off = { .min = 0, .max = UINT16_MAX},
1616 .imm = { .min = 0, .max = UINT32_MAX},
1619 [(BPF_JMP | EBPF_JSGT | BPF_K)] = {
1620 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1621 .off = { .min = 0, .max = UINT16_MAX},
1622 .imm = { .min = 0, .max = UINT32_MAX},
1625 [(BPF_JMP | EBPF_JSLT | BPF_K)] = {
1626 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1627 .off = { .min = 0, .max = UINT16_MAX},
1628 .imm = { .min = 0, .max = UINT32_MAX},
1631 [(BPF_JMP | EBPF_JSGE | BPF_K)] = {
1632 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1633 .off = { .min = 0, .max = UINT16_MAX},
1634 .imm = { .min = 0, .max = UINT32_MAX},
1637 [(BPF_JMP | EBPF_JSLE | BPF_K)] = {
1638 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1639 .off = { .min = 0, .max = UINT16_MAX},
1640 .imm = { .min = 0, .max = UINT32_MAX},
1643 [(BPF_JMP | BPF_JSET | BPF_K)] = {
1644 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1645 .off = { .min = 0, .max = UINT16_MAX},
1646 .imm = { .min = 0, .max = UINT32_MAX},
1649 /* jcc REG instructions */
1650 [(BPF_JMP | BPF_JEQ | BPF_X)] = {
1651 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1652 .off = { .min = 0, .max = UINT16_MAX},
1653 .imm = { .min = 0, .max = 0},
1656 [(BPF_JMP | EBPF_JNE | BPF_X)] = {
1657 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1658 .off = { .min = 0, .max = UINT16_MAX},
1659 .imm = { .min = 0, .max = 0},
1662 [(BPF_JMP | BPF_JGT | BPF_X)] = {
1663 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1664 .off = { .min = 0, .max = UINT16_MAX},
1665 .imm = { .min = 0, .max = 0},
1668 [(BPF_JMP | EBPF_JLT | BPF_X)] = {
1669 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1670 .off = { .min = 0, .max = UINT16_MAX},
1671 .imm = { .min = 0, .max = 0},
1674 [(BPF_JMP | BPF_JGE | BPF_X)] = {
1675 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1676 .off = { .min = 0, .max = UINT16_MAX},
1677 .imm = { .min = 0, .max = 0},
1680 [(BPF_JMP | EBPF_JLE | BPF_X)] = {
1681 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1682 .off = { .min = 0, .max = UINT16_MAX},
1683 .imm = { .min = 0, .max = 0},
1686 [(BPF_JMP | EBPF_JSGT | BPF_X)] = {
1687 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1688 .off = { .min = 0, .max = UINT16_MAX},
1689 .imm = { .min = 0, .max = 0},
1692 [(BPF_JMP | EBPF_JSLT | BPF_X)] = {
1693 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1694 .off = { .min = 0, .max = UINT16_MAX},
1695 .imm = { .min = 0, .max = 0},
1697 [(BPF_JMP | EBPF_JSGE | BPF_X)] = {
1698 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1699 .off = { .min = 0, .max = UINT16_MAX},
1700 .imm = { .min = 0, .max = 0},
1703 [(BPF_JMP | EBPF_JSLE | BPF_X)] = {
1704 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1705 .off = { .min = 0, .max = UINT16_MAX},
1706 .imm = { .min = 0, .max = 0},
1709 [(BPF_JMP | BPF_JSET | BPF_X)] = {
1710 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1711 .off = { .min = 0, .max = UINT16_MAX},
1712 .imm = { .min = 0, .max = 0},
1715 /* call instruction */
1716 [(BPF_JMP | EBPF_CALL)] = {
1717 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1718 .off = { .min = 0, .max = 0},
1719 .imm = { .min = 0, .max = UINT32_MAX},
1722 /* ret instruction */
1723 [(BPF_JMP | EBPF_EXIT)] = {
1724 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1725 .off = { .min = 0, .max = 0},
1726 .imm = { .min = 0, .max = 0},
1732 * make sure that instruction syntax is valid,
1733 * and its fields don't violate particular instruction type restrictions.
1736 check_syntax(const struct ebpf_insn *ins)
1745 if (ins_chk[op].mask.dreg == 0)
1746 return "invalid opcode";
1748 if ((ins_chk[op].mask.dreg & 1 << ins->dst_reg) == 0)
1749 return "invalid dst-reg field";
1751 if ((ins_chk[op].mask.sreg & 1 << ins->src_reg) == 0)
1752 return "invalid src-reg field";
1755 if (ins_chk[op].off.min > off || ins_chk[op].off.max < off)
1756 return "invalid off field";
1759 if (ins_chk[op].imm.min > imm || ins_chk[op].imm.max < imm)
1760 return "invalid imm field";
1762 if (ins_chk[op].check != NULL)
1763 return ins_chk[op].check(ins);
1769 * helper function, return instruction index for the given node.
1772 get_node_idx(const struct bpf_verifier *bvf, const struct inst_node *node)
1774 return node - bvf->in;
1778 * helper function, used to walk through constructed CFG.
1780 static struct inst_node *
1781 get_next_node(struct bpf_verifier *bvf, struct inst_node *node)
1783 uint32_t ce, ne, dst;
1786 ce = node->cur_edge;
1791 dst = node->edge_dest[ce];
1792 return bvf->in + dst;
1796 set_node_colour(struct bpf_verifier *bvf, struct inst_node *node,
1801 prev = node->colour;
1804 bvf->node_colour[prev]--;
1805 bvf->node_colour[new]++;
1809 * helper function, add new edge between two nodes.
1812 add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
1816 if (nidx > bvf->prm->nb_ins) {
1817 RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
1819 __func__, get_node_idx(bvf, node), nidx);
1824 if (ne >= RTE_DIM(node->edge_dest)) {
1825 RTE_BPF_LOG(ERR, "%s: internal error at pc: %u\n",
1826 __func__, get_node_idx(bvf, node));
1830 node->edge_dest[ne] = nidx;
1831 node->nb_edge = ne + 1;
1836 * helper function, determine type of edge between two nodes.
1839 set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
1840 const struct inst_node *next)
1842 uint32_t ce, clr, type;
1844 ce = node->cur_edge - 1;
1847 type = UNKNOWN_EDGE;
1851 else if (clr == GREY)
1853 else if (clr == BLACK)
1855 * in fact it could be either direct or cross edge,
1856 * but for now, we don't need to distinguish between them.
1860 node->edge_type[ce] = type;
1861 bvf->edge_type[type]++;
1864 static struct inst_node *
1865 get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
1867 return bvf->in + node->prev_node;
1871 * Depth-First Search (DFS) through previously constructed
1872 * Control Flow Graph (CFG).
1873 * Information collected at this path would be used later
1874 * to determine is there any loops, and/or unreachable instructions.
1877 dfs(struct bpf_verifier *bvf)
1879 struct inst_node *next, *node;
1882 while (node != NULL) {
1884 if (node->colour == WHITE)
1885 set_node_colour(bvf, node, GREY);
1887 if (node->colour == GREY) {
1889 /* find next unprocessed child node */
1891 next = get_next_node(bvf, node);
1894 set_edge_type(bvf, node, next);
1895 } while (next->colour != WHITE);
1898 /* proceed with next child */
1899 next->prev_node = get_node_idx(bvf, node);
1903 * finished with current node and all it's kids,
1904 * proceed with parent
1906 set_node_colour(bvf, node, BLACK);
1908 node = get_prev_node(bvf, node);
1916 * report unreachable instructions.
1919 log_unreachable(const struct bpf_verifier *bvf)
1922 struct inst_node *node;
1923 const struct ebpf_insn *ins;
1925 for (i = 0; i != bvf->prm->nb_ins; i++) {
1928 ins = bvf->prm->ins + i;
1930 if (node->colour == WHITE &&
1931 ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
1932 RTE_BPF_LOG(ERR, "unreachable code at pc: %u;\n", i);
1937 * report loops detected.
1940 log_loop(const struct bpf_verifier *bvf)
1943 struct inst_node *node;
1945 for (i = 0; i != bvf->prm->nb_ins; i++) {
1948 if (node->colour != BLACK)
1951 for (j = 0; j != node->nb_edge; j++) {
1952 if (node->edge_type[j] == BACK_EDGE)
1954 "loop at pc:%u --> pc:%u;\n",
1955 i, node->edge_dest[j]);
1961 * First pass goes though all instructions in the set, checks that each
1962 * instruction is a valid one (correct syntax, valid field values, etc.)
1963 * and constructs control flow graph (CFG).
1964 * Then depth-first search is performed over the constructed graph.
1965 * Programs with unreachable instructions and/or loops will be rejected.
1968 validate(struct bpf_verifier *bvf)
1972 struct inst_node *node;
1973 const struct ebpf_insn *ins;
1977 for (i = 0; i < bvf->prm->nb_ins; i++) {
1979 ins = bvf->prm->ins + i;
1982 err = check_syntax(ins);
1984 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
1990 * construct CFG, jcc nodes have to outgoing edges,
1991 * 'exit' nodes - none, all other nodes have exactly one
1994 switch (ins->code) {
1995 case (BPF_JMP | EBPF_EXIT):
1997 case (BPF_JMP | BPF_JEQ | BPF_K):
1998 case (BPF_JMP | EBPF_JNE | BPF_K):
1999 case (BPF_JMP | BPF_JGT | BPF_K):
2000 case (BPF_JMP | EBPF_JLT | BPF_K):
2001 case (BPF_JMP | BPF_JGE | BPF_K):
2002 case (BPF_JMP | EBPF_JLE | BPF_K):
2003 case (BPF_JMP | EBPF_JSGT | BPF_K):
2004 case (BPF_JMP | EBPF_JSLT | BPF_K):
2005 case (BPF_JMP | EBPF_JSGE | BPF_K):
2006 case (BPF_JMP | EBPF_JSLE | BPF_K):
2007 case (BPF_JMP | BPF_JSET | BPF_K):
2008 case (BPF_JMP | BPF_JEQ | BPF_X):
2009 case (BPF_JMP | EBPF_JNE | BPF_X):
2010 case (BPF_JMP | BPF_JGT | BPF_X):
2011 case (BPF_JMP | EBPF_JLT | BPF_X):
2012 case (BPF_JMP | BPF_JGE | BPF_X):
2013 case (BPF_JMP | EBPF_JLE | BPF_X):
2014 case (BPF_JMP | EBPF_JSGT | BPF_X):
2015 case (BPF_JMP | EBPF_JSLT | BPF_X):
2016 case (BPF_JMP | EBPF_JSGE | BPF_X):
2017 case (BPF_JMP | EBPF_JSLE | BPF_X):
2018 case (BPF_JMP | BPF_JSET | BPF_X):
2019 rc |= add_edge(bvf, node, i + ins->off + 1);
2020 rc |= add_edge(bvf, node, i + 1);
2021 bvf->nb_jcc_nodes++;
2023 case (BPF_JMP | BPF_JA):
2024 rc |= add_edge(bvf, node, i + ins->off + 1);
2026 /* load 64 bit immediate value */
2027 case (BPF_LD | BPF_IMM | EBPF_DW):
2028 rc |= add_edge(bvf, node, i + 2);
2031 case (BPF_LD | BPF_ABS | BPF_B):
2032 case (BPF_LD | BPF_ABS | BPF_H):
2033 case (BPF_LD | BPF_ABS | BPF_W):
2034 case (BPF_LD | BPF_IND | BPF_B):
2035 case (BPF_LD | BPF_IND | BPF_H):
2036 case (BPF_LD | BPF_IND | BPF_W):
2037 bvf->nb_ldmb_nodes++;
2040 rc |= add_edge(bvf, node, i + 1);
2045 bvf->node_colour[WHITE]++;
2053 RTE_BPF_LOG(DEBUG, "%s(%p) stats:\n"
2055 "nb_jcc_nodes=%u;\n"
2056 "node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
2057 "edge_type={[UNKNOWN]=%u, [TREE]=%u, [BACK]=%u, [CROSS]=%u};\n",
2061 bvf->node_colour[WHITE], bvf->node_colour[GREY],
2062 bvf->node_colour[BLACK],
2063 bvf->edge_type[UNKNOWN_EDGE], bvf->edge_type[TREE_EDGE],
2064 bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
2066 if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
2067 RTE_BPF_LOG(ERR, "%s(%p) unreachable instructions;\n",
2069 log_unreachable(bvf);
2073 if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
2074 bvf->edge_type[UNKNOWN_EDGE] != 0) {
2075 RTE_BPF_LOG(ERR, "%s(%p) DFS internal error;\n",
2080 if (bvf->edge_type[BACK_EDGE] != 0) {
2081 RTE_BPF_LOG(ERR, "%s(%p) loops detected;\n",
2091 * helper functions get/free eval states.
2093 static struct bpf_eval_state *
2094 pull_eval_state(struct bpf_verifier *bvf)
2098 n = bvf->evst_pool.cur;
2099 if (n == bvf->evst_pool.num)
2102 bvf->evst_pool.cur = n + 1;
2103 return bvf->evst_pool.ent + n;
2107 push_eval_state(struct bpf_verifier *bvf)
2109 bvf->evst_pool.cur--;
2113 evst_pool_fini(struct bpf_verifier *bvf)
2116 free(bvf->evst_pool.ent);
2117 memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
2121 evst_pool_init(struct bpf_verifier *bvf)
2125 n = bvf->nb_jcc_nodes + 1;
2127 bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
2128 if (bvf->evst_pool.ent == NULL)
2131 bvf->evst_pool.num = n;
2132 bvf->evst_pool.cur = 0;
2134 bvf->evst = pull_eval_state(bvf);
2139 * Save current eval state.
2142 save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2144 struct bpf_eval_state *st;
2146 /* get new eval_state for this node */
2147 st = pull_eval_state(bvf);
2150 "%s: internal error (out of space) at pc: %u\n",
2151 __func__, get_node_idx(bvf, node));
2155 /* make a copy of current state */
2156 memcpy(st, bvf->evst, sizeof(*st));
2158 /* swap current state with new one */
2159 node->evst = bvf->evst;
2162 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2163 __func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
2169 * Restore previous eval state and mark current eval state as free.
2172 restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2174 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2175 __func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
2177 bvf->evst = node->evst;
2179 push_eval_state(bvf);
2183 log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
2184 uint32_t pc, int32_t loglvl)
2186 const struct bpf_eval_state *st;
2187 const struct bpf_reg_val *rv;
2189 rte_log(loglvl, rte_bpf_logtype, "%s(pc=%u):\n", __func__, pc);
2192 rv = st->rv + ins->dst_reg;
2194 rte_log(loglvl, rte_bpf_logtype,
2196 "\tv={type=%u, size=%zu},\n"
2197 "\tmask=0x%" PRIx64 ",\n"
2198 "\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
2199 "\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
2202 rv->v.type, rv->v.size,
2204 rv->u.min, rv->u.max,
2205 rv->s.min, rv->s.max);
2209 * Do second pass through CFG and try to evaluate instructions
2210 * via each possible path.
2211 * Right now evaluation functionality is quite limited.
2212 * Still need to add extra checks for:
2213 * - use/return uninitialized registers.
2214 * - use uninitialized data from the stack.
2215 * - memory boundaries violation.
2218 evaluate(struct bpf_verifier *bvf)
2223 const struct ebpf_insn *ins;
2224 struct inst_node *next, *node;
2226 /* initial state of frame pointer */
2227 static const struct bpf_reg_val rvfp = {
2229 .type = BPF_ARG_PTR_STACK,
2230 .size = MAX_BPF_STACK_SIZE,
2233 .u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2234 .s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2237 bvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;
2238 bvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;
2239 if (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)
2240 eval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);
2242 bvf->evst->rv[EBPF_REG_10] = rvfp;
2244 ins = bvf->prm->ins;
2249 while (node != NULL && rc == 0) {
2252 * current node evaluation, make sure we evaluate
2253 * each node only once.
2258 idx = get_node_idx(bvf, node);
2261 /* for jcc node make a copy of evaluation state */
2262 if (node->nb_edge > 1)
2263 rc |= save_eval_state(bvf, node);
2265 if (ins_chk[op].eval != NULL && rc == 0) {
2266 err = ins_chk[op].eval(bvf, ins + idx);
2268 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
2269 __func__, err, idx);
2274 log_eval_state(bvf, ins + idx, idx, RTE_LOG_DEBUG);
2278 /* proceed through CFG */
2279 next = get_next_node(bvf, node);
2282 /* proceed with next child */
2283 if (node->cur_edge == node->nb_edge &&
2285 restore_eval_state(bvf, node);
2287 next->prev_node = get_node_idx(bvf, node);
2291 * finished with current node and all it's kids,
2292 * proceed with parent
2295 node = get_prev_node(bvf, node);
2298 if (node == bvf->in)
2307 bpf_validate(struct rte_bpf *bpf)
2310 struct bpf_verifier bvf;
2312 /* check input argument type, don't allow mbuf ptr on 32-bit */
2313 if (bpf->prm.prog_arg.type != RTE_BPF_ARG_RAW &&
2314 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
2315 (sizeof(uint64_t) != sizeof(uintptr_t) ||
2316 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
2317 RTE_BPF_LOG(ERR, "%s: unsupported argument type\n", __func__);
2321 memset(&bvf, 0, sizeof(bvf));
2322 bvf.prm = &bpf->prm;
2323 bvf.in = calloc(bpf->prm.nb_ins, sizeof(bvf.in[0]));
2327 rc = validate(&bvf);
2330 rc = evst_pool_init(&bvf);
2332 rc = evaluate(&bvf);
2333 evst_pool_fini(&bvf);
2338 /* copy collected info */
2340 bpf->stack_sz = bvf.stack_sz;
2342 /* for LD_ABS/LD_IND, we'll need extra space on the stack */
2343 if (bvf.nb_ldmb_nodes != 0)
2344 bpf->stack_sz = RTE_ALIGN_CEIL(bpf->stack_sz +
2345 sizeof(uint64_t), sizeof(uint64_t));