1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
12 #include <rte_common.h>
14 #include <rte_byteorder.h>
18 #define BPF_ARG_PTR_STACK RTE_BPF_ARG_RESERVED
33 struct bpf_eval_state {
34 struct bpf_reg_val rv[EBPF_REG_NUM];
35 struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
38 /* possible instruction node colour */
46 /* possible edge types */
61 uint8_t edge_type[MAX_EDGES];
62 uint32_t edge_dest[MAX_EDGES];
64 struct bpf_eval_state *evst;
68 const struct rte_bpf_prm *prm;
72 uint32_t nb_jcc_nodes;
73 uint32_t node_colour[MAX_NODE_COLOUR];
74 uint32_t edge_type[MAX_EDGE_TYPE];
75 struct bpf_eval_state *evst;
76 struct inst_node *evin;
80 struct bpf_eval_state *ent;
84 struct bpf_ins_check {
97 const char * (*check)(const struct ebpf_insn *);
98 const char * (*eval)(struct bpf_verifier *, const struct ebpf_insn *);
101 #define ALL_REGS RTE_LEN2MASK(EBPF_REG_NUM, uint16_t)
102 #define WRT_REGS RTE_LEN2MASK(EBPF_REG_10, uint16_t)
103 #define ZERO_REG RTE_LEN2MASK(EBPF_REG_1, uint16_t)
106 * check and evaluate functions for particular instruction types.
110 check_alu_bele(const struct ebpf_insn *ins)
112 if (ins->imm != 16 && ins->imm != 32 && ins->imm != 64)
113 return "invalid imm field";
118 eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
121 if (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)
122 return "undefined return value";
126 /* setup max possible with this mask bounds */
128 eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)
135 eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)
137 rv->s.max = mask >> 1;
138 rv->s.min = rv->s.max ^ UINT64_MAX;
142 eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)
144 eval_umax_bound(rv, mask);
145 eval_smax_bound(rv, mask);
149 eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)
151 eval_max_bound(rv, mask);
152 rv->v.type = RTE_BPF_ARG_RAW;
157 eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)
167 eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)
171 v = (uint64_t)imm & mask;
173 rv->v.type = RTE_BPF_ARG_RAW;
174 eval_fill_imm64(rv, mask, v);
178 eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
182 struct bpf_reg_val *rd;
184 val = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;
186 rd = bvf->evst->rv + ins->dst_reg;
187 rd->v.type = RTE_BPF_ARG_RAW;
188 eval_fill_imm64(rd, UINT64_MAX, val);
190 for (i = 0; i != bvf->prm->nb_xsym; i++) {
192 /* load of external variable */
193 if (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&
194 (uintptr_t)bvf->prm->xsym[i].var.val == val) {
195 rd->v = bvf->prm->xsym[i].var.desc;
196 eval_fill_imm64(rd, UINT64_MAX, 0);
205 eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)
207 struct bpf_reg_val rt;
209 rt.u.min = rv->u.min & mask;
210 rt.u.max = rv->u.max & mask;
211 if (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {
212 rv->u.max = RTE_MAX(rt.u.max, mask);
216 eval_smax_bound(&rt, mask);
217 rv->s.max = RTE_MIN(rt.s.max, rv->s.max);
218 rv->s.min = RTE_MAX(rt.s.min, rv->s.min);
224 eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
226 struct bpf_reg_val rv;
228 rv.u.min = (rd->u.min + rs->u.min) & msk;
229 rv.u.max = (rd->u.min + rs->u.max) & msk;
230 rv.s.min = (rd->s.min + rs->s.min) & msk;
231 rv.s.max = (rd->s.max + rs->s.max) & msk;
234 * if at least one of the operands is not constant,
235 * then check for overflow
237 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
238 (rv.u.min < rd->u.min || rv.u.max < rd->u.max))
239 eval_umax_bound(&rv, msk);
241 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
242 (((rs->s.min < 0 && rv.s.min > rd->s.min) ||
243 rv.s.min < rd->s.min) ||
244 ((rs->s.max < 0 && rv.s.max > rd->s.max) ||
245 rv.s.max < rd->s.max)))
246 eval_smax_bound(&rv, msk);
253 eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
255 struct bpf_reg_val rv;
257 rv.u.min = (rd->u.min - rs->u.min) & msk;
258 rv.u.max = (rd->u.min - rs->u.max) & msk;
259 rv.s.min = (rd->s.min - rs->s.min) & msk;
260 rv.s.max = (rd->s.max - rs->s.max) & msk;
263 * if at least one of the operands is not constant,
264 * then check for overflow
266 if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
267 (rv.u.min > rd->u.min || rv.u.max > rd->u.max))
268 eval_umax_bound(&rv, msk);
270 if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
271 (((rs->s.min < 0 && rv.s.min < rd->s.min) ||
272 rv.s.min > rd->s.min) ||
273 ((rs->s.max < 0 && rv.s.max < rd->s.max) ||
274 rv.s.max > rd->s.max)))
275 eval_smax_bound(&rv, msk);
282 eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
285 /* check if shift value is less then max result bits */
286 if (rs->u.max >= opsz) {
287 eval_max_bound(rd, msk);
291 /* check for overflow */
292 if (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))
293 eval_umax_bound(rd, msk);
295 rd->u.max <<= rs->u.max;
296 rd->u.min <<= rs->u.min;
299 /* check that dreg values are and would remain always positive */
300 if ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=
301 RTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))
302 eval_smax_bound(rd, msk);
304 rd->s.max <<= rs->u.max;
305 rd->s.min <<= rs->u.min;
310 eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
313 /* check if shift value is less then max result bits */
314 if (rs->u.max >= opsz) {
315 eval_max_bound(rd, msk);
319 rd->u.max >>= rs->u.min;
320 rd->u.min >>= rs->u.max;
322 /* check that dreg values are always positive */
323 if ((uint64_t)rd->s.min >> (opsz - 1) != 0)
324 eval_smax_bound(rd, msk);
326 rd->s.max >>= rs->u.min;
327 rd->s.min >>= rs->u.max;
332 eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
337 /* check if shift value is less then max result bits */
338 if (rs->u.max >= opsz) {
339 eval_max_bound(rd, msk);
343 rd->u.max = (int64_t)rd->u.max >> rs->u.min;
344 rd->u.min = (int64_t)rd->u.min >> rs->u.max;
346 /* if we have 32-bit values - extend them to 64-bit */
347 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
355 rd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;
357 rd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;
360 rd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;
362 rd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;
366 eval_umax_bits(uint64_t v, size_t opsz)
371 v = __builtin_clzll(v);
372 return RTE_LEN2MASK(opsz - v, uint64_t);
375 /* estimate max possible value for (v1 & v2) */
377 eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)
379 v1 = eval_umax_bits(v1, opsz);
380 v2 = eval_umax_bits(v2, opsz);
384 /* estimate max possible value for (v1 | v2) */
386 eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)
388 v1 = eval_umax_bits(v1, opsz);
389 v2 = eval_umax_bits(v2, opsz);
394 eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
397 /* both operands are constants */
398 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
399 rd->u.min &= rs->u.min;
400 rd->u.max &= rs->u.max;
402 rd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);
403 rd->u.min &= rs->u.min;
406 /* both operands are constants */
407 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
408 rd->s.min &= rs->s.min;
409 rd->s.max &= rs->s.max;
410 /* at least one of operand is non-negative */
411 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
412 rd->s.max = eval_uand_max(rd->s.max & (msk >> 1),
413 rs->s.max & (msk >> 1), opsz);
414 rd->s.min &= rs->s.min;
416 eval_smax_bound(rd, msk);
420 eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
423 /* both operands are constants */
424 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
425 rd->u.min |= rs->u.min;
426 rd->u.max |= rs->u.max;
428 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
429 rd->u.min |= rs->u.min;
432 /* both operands are constants */
433 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
434 rd->s.min |= rs->s.min;
435 rd->s.max |= rs->s.max;
437 /* both operands are non-negative */
438 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
439 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
440 rd->s.min |= rs->s.min;
442 eval_smax_bound(rd, msk);
446 eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
449 /* both operands are constants */
450 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
451 rd->u.min ^= rs->u.min;
452 rd->u.max ^= rs->u.max;
454 rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
458 /* both operands are constants */
459 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
460 rd->s.min ^= rs->s.min;
461 rd->s.max ^= rs->s.max;
463 /* both operands are non-negative */
464 } else if (rd->s.min >= 0 || rs->s.min >= 0) {
465 rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
468 eval_smax_bound(rd, msk);
472 eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
475 /* both operands are constants */
476 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
477 rd->u.min = (rd->u.min * rs->u.min) & msk;
478 rd->u.max = (rd->u.max * rs->u.max) & msk;
479 /* check for overflow */
480 } else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {
481 rd->u.max *= rs->u.max;
482 rd->u.min *= rd->u.min;
484 eval_umax_bound(rd, msk);
486 /* both operands are constants */
487 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
488 rd->s.min = (rd->s.min * rs->s.min) & msk;
489 rd->s.max = (rd->s.max * rs->s.max) & msk;
490 /* check that both operands are positive and no overflow */
491 } else if (rd->s.min >= 0 && rs->s.min >= 0) {
492 rd->s.max *= rs->s.max;
493 rd->s.min *= rd->s.min;
495 eval_smax_bound(rd, msk);
499 eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,
500 size_t opsz, uint64_t msk)
502 /* both operands are constants */
503 if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
505 return "division by 0";
507 rd->u.min /= rs->u.min;
508 rd->u.max /= rs->u.max;
510 rd->u.min %= rs->u.min;
511 rd->u.max %= rs->u.max;
515 rd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);
517 rd->u.max = rd->u.max;
521 /* if we have 32-bit values - extend them to 64-bit */
522 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
523 rd->s.min = (int32_t)rd->s.min;
524 rd->s.max = (int32_t)rd->s.max;
525 rs->s.min = (int32_t)rs->s.min;
526 rs->s.max = (int32_t)rs->s.max;
529 /* both operands are constants */
530 if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
532 return "division by 0";
534 rd->s.min /= rs->s.min;
535 rd->s.max /= rs->s.max;
537 rd->s.min %= rs->s.min;
538 rd->s.max %= rs->s.max;
540 } else if (op == BPF_MOD) {
541 rd->s.min = RTE_MAX(rd->s.max, 0);
542 rd->s.min = RTE_MIN(rd->s.min, 0);
544 eval_smax_bound(rd, msk);
553 eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
558 /* if we have 32-bit values - extend them to 64-bit */
559 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
560 rd->u.min = (int32_t)rd->u.min;
561 rd->u.max = (int32_t)rd->u.max;
564 ux = -(int64_t)rd->u.min & msk;
565 uy = -(int64_t)rd->u.max & msk;
567 rd->u.max = RTE_MAX(ux, uy);
568 rd->u.min = RTE_MIN(ux, uy);
570 /* if we have 32-bit values - extend them to 64-bit */
571 if (opsz == sizeof(uint32_t) * CHAR_BIT) {
572 rd->s.min = (int32_t)rd->s.min;
573 rd->s.max = (int32_t)rd->s.max;
576 sx = -rd->s.min & msk;
577 sy = -rd->s.max & msk;
579 rd->s.max = RTE_MAX(sx, sy);
580 rd->s.min = RTE_MIN(sx, sy);
584 * check that destination and source operand are in defined state.
587 eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)
589 if (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)
590 return "dest reg value is undefined";
591 if (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)
592 return "src reg value is undefined";
597 eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
603 struct bpf_eval_state *st;
604 struct bpf_reg_val *rd, rs;
606 opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
607 sizeof(uint32_t) : sizeof(uint64_t);
608 opsz = opsz * CHAR_BIT;
609 msk = RTE_LEN2MASK(opsz, uint64_t);
612 rd = st->rv + ins->dst_reg;
614 if (BPF_SRC(ins->code) == BPF_X) {
615 rs = st->rv[ins->src_reg];
616 eval_apply_mask(&rs, msk);
618 eval_fill_imm(&rs, msk, ins->imm);
620 eval_apply_mask(rd, msk);
622 op = BPF_OP(ins->code);
624 err = eval_defined((op != EBPF_MOV) ? rd : NULL,
625 (op != BPF_NEG) ? &rs : NULL);
630 eval_add(rd, &rs, msk);
631 else if (op == BPF_SUB)
632 eval_sub(rd, &rs, msk);
633 else if (op == BPF_LSH)
634 eval_lsh(rd, &rs, opsz, msk);
635 else if (op == BPF_RSH)
636 eval_rsh(rd, &rs, opsz, msk);
637 else if (op == EBPF_ARSH)
638 eval_arsh(rd, &rs, opsz, msk);
639 else if (op == BPF_AND)
640 eval_and(rd, &rs, opsz, msk);
641 else if (op == BPF_OR)
642 eval_or(rd, &rs, opsz, msk);
643 else if (op == BPF_XOR)
644 eval_xor(rd, &rs, opsz, msk);
645 else if (op == BPF_MUL)
646 eval_mul(rd, &rs, opsz, msk);
647 else if (op == BPF_DIV || op == BPF_MOD)
648 err = eval_divmod(op, rd, &rs, opsz, msk);
649 else if (op == BPF_NEG)
650 eval_neg(rd, opsz, msk);
651 else if (op == EBPF_MOV)
654 eval_max_bound(rd, msk);
660 eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
663 struct bpf_eval_state *st;
664 struct bpf_reg_val *rd;
667 msk = RTE_LEN2MASK(ins->imm, uint64_t);
670 rd = st->rv + ins->dst_reg;
672 err = eval_defined(rd, NULL);
676 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
677 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))
678 eval_max_bound(rd, msk);
680 eval_apply_mask(rd, msk);
682 if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))
683 eval_max_bound(rd, msk);
685 eval_apply_mask(rd, msk);
692 eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,
693 uint32_t align, int16_t off)
695 struct bpf_reg_val rv;
697 /* calculate reg + offset */
698 eval_fill_imm(&rv, rm->mask, off);
699 eval_add(rm, &rv, rm->mask);
701 if (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)
702 return "destination is not a pointer";
704 if (rm->mask != UINT64_MAX)
705 return "pointer truncation";
707 if (rm->u.max + opsz > rm->v.size ||
708 (uint64_t)rm->s.max + opsz > rm->v.size ||
710 return "memory boundary violation";
712 if (rm->u.max % align != 0)
713 return "unaligned memory access";
715 if (rm->v.type == BPF_ARG_PTR_STACK) {
717 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
718 rm->u.max != (uint64_t)rm->s.max)
719 return "stack access with variable offset";
721 bvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);
723 /* pointer to mbuf */
724 } else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {
726 if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
727 rm->u.max != (uint64_t)rm->s.max)
728 return "mbuf access with variable offset";
735 eval_max_load(struct bpf_reg_val *rv, uint64_t mask)
737 eval_umax_bound(rv, mask);
739 /* full 64-bit load */
740 if (mask == UINT64_MAX)
741 eval_smax_bound(rv, mask);
743 /* zero-extend load */
744 rv->s.min = rv->u.min;
745 rv->s.max = rv->u.max;
750 eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
755 struct bpf_eval_state *st;
756 struct bpf_reg_val *rd, rs;
757 const struct bpf_reg_val *sv;
760 rd = st->rv + ins->dst_reg;
761 rs = st->rv[ins->src_reg];
762 opsz = bpf_size(BPF_SIZE(ins->code));
763 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
765 err = eval_ptr(bvf, &rs, opsz, 1, ins->off);
769 if (rs.v.type == BPF_ARG_PTR_STACK) {
771 sv = st->sv + rs.u.max / sizeof(uint64_t);
772 if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)
773 return "undefined value on the stack";
777 /* pointer to mbuf */
778 } else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {
780 if (rs.u.max == offsetof(struct rte_mbuf, next)) {
781 eval_fill_imm(rd, msk, 0);
783 } else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {
784 eval_fill_imm(rd, msk, 0);
785 rd->v.type = RTE_BPF_ARG_PTR;
786 rd->v.size = rs.v.buf_size;
787 } else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {
788 eval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);
789 rd->v.type = RTE_BPF_ARG_RAW;
791 eval_max_load(rd, msk);
792 rd->v.type = RTE_BPF_ARG_RAW;
795 /* pointer to raw data */
797 eval_max_load(rd, msk);
798 rd->v.type = RTE_BPF_ARG_RAW;
805 eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)
809 static const struct {
812 } mbuf_ro_fileds[] = {
813 { .off = offsetof(struct rte_mbuf, buf_addr), },
814 { .off = offsetof(struct rte_mbuf, refcnt), },
815 { .off = offsetof(struct rte_mbuf, nb_segs), },
816 { .off = offsetof(struct rte_mbuf, buf_len), },
817 { .off = offsetof(struct rte_mbuf, pool), },
818 { .off = offsetof(struct rte_mbuf, next), },
819 { .off = offsetof(struct rte_mbuf, priv_size), },
822 for (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&
823 (mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=
824 rv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);
828 if (i != RTE_DIM(mbuf_ro_fileds))
829 return "store to the read-only mbuf field";
836 eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
841 struct bpf_eval_state *st;
842 struct bpf_reg_val rd, rs, *sv;
844 opsz = bpf_size(BPF_SIZE(ins->code));
845 msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
848 rd = st->rv[ins->dst_reg];
850 if (BPF_CLASS(ins->code) == BPF_STX) {
851 rs = st->rv[ins->src_reg];
852 eval_apply_mask(&rs, msk);
854 eval_fill_imm(&rs, msk, ins->imm);
856 err = eval_defined(NULL, &rs);
860 err = eval_ptr(bvf, &rd, opsz, 1, ins->off);
864 if (rd.v.type == BPF_ARG_PTR_STACK) {
866 sv = st->sv + rd.u.max / sizeof(uint64_t);
867 if (BPF_CLASS(ins->code) == BPF_STX &&
868 BPF_MODE(ins->code) == EBPF_XADD)
869 eval_max_bound(sv, msk);
873 /* pointer to mbuf */
874 } else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {
875 err = eval_mbuf_store(&rd, opsz);
884 eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,
885 struct bpf_reg_val *rv)
888 struct bpf_eval_state *st;
893 if (rv->v.type == RTE_BPF_ARG_UNDEF)
894 return "Undefined argument type";
896 if (arg->type != rv->v.type &&
897 arg->type != RTE_BPF_ARG_RAW &&
898 (arg->type != RTE_BPF_ARG_PTR ||
899 RTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))
900 return "Invalid argument type";
904 /* argument is a pointer */
905 if (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {
907 err = eval_ptr(bvf, rv, arg->size, 1, 0);
910 * pointer to the variable on the stack is passed
911 * as an argument, mark stack space it occupies as initialized.
913 if (err == NULL && rv->v.type == BPF_ARG_PTR_STACK) {
915 i = rv->u.max / sizeof(uint64_t);
916 n = i + arg->size / sizeof(uint64_t);
918 eval_fill_max_bound(st->sv + i, UINT64_MAX);
928 eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
931 struct bpf_reg_val *rv;
932 const struct rte_bpf_xsym *xsym;
937 if (idx >= bvf->prm->nb_xsym ||
938 bvf->prm->xsym[idx].type != RTE_BPF_XTYPE_FUNC)
939 return "invalid external function index";
941 /* for now don't support function calls on 32 bit platform */
942 if (sizeof(uint64_t) != sizeof(uintptr_t))
943 return "function calls are supported only for 64 bit apps";
945 xsym = bvf->prm->xsym + idx;
947 /* evaluate function arguments */
949 for (i = 0; i != xsym->func.nb_args && err == NULL; i++) {
950 err = eval_func_arg(bvf, xsym->func.args + i,
951 bvf->evst->rv + EBPF_REG_1 + i);
954 /* R1-R5 argument/scratch registers */
955 for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
956 bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
958 /* update return value */
960 rv = bvf->evst->rv + EBPF_REG_0;
961 rv->v = xsym->func.ret;
962 if (rv->v.type == RTE_BPF_ARG_RAW)
963 eval_fill_max_bound(rv,
964 RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
965 else if (RTE_BPF_ARG_PTR_TYPE(rv->v.type) != 0)
966 eval_fill_imm64(rv, UINTPTR_MAX, 0);
972 eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)
974 /* sreg is constant */
975 if (trs->u.min == trs->u.max) {
977 /* dreg is constant */
978 } else if (trd->u.min == trd->u.max) {
981 trd->u.max = RTE_MIN(trd->u.max, trs->u.max);
982 trd->u.min = RTE_MAX(trd->u.min, trs->u.min);
986 /* sreg is constant */
987 if (trs->s.min == trs->s.max) {
989 /* dreg is constant */
990 } else if (trd->s.min == trd->s.max) {
993 trd->s.max = RTE_MIN(trd->s.max, trs->s.max);
994 trd->s.min = RTE_MAX(trd->s.min, trs->s.min);
1000 eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1001 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1003 frd->u.max = RTE_MIN(frd->u.max, frs->u.min);
1004 trd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);
1008 eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1009 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1011 frd->u.min = RTE_MAX(frd->u.min, frs->u.min);
1012 trd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);
1016 eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1017 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1019 frd->s.max = RTE_MIN(frd->s.max, frs->s.min);
1020 trd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);
1024 eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
1025 struct bpf_reg_val *frd, struct bpf_reg_val *frs)
1027 frd->s.min = RTE_MAX(frd->s.min, frs->s.min);
1028 trd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);
1032 eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
1036 struct bpf_eval_state *fst, *tst;
1037 struct bpf_reg_val *frd, *frs, *trd, *trs;
1038 struct bpf_reg_val rvf, rvt;
1041 fst = bvf->evin->evst;
1043 frd = fst->rv + ins->dst_reg;
1044 trd = tst->rv + ins->dst_reg;
1046 if (BPF_SRC(ins->code) == BPF_X) {
1047 frs = fst->rv + ins->src_reg;
1048 trs = tst->rv + ins->src_reg;
1052 eval_fill_imm(frs, UINT64_MAX, ins->imm);
1053 eval_fill_imm(trs, UINT64_MAX, ins->imm);
1056 err = eval_defined(trd, trs);
1060 op = BPF_OP(ins->code);
1063 eval_jeq_jne(trd, trs);
1064 else if (op == EBPF_JNE)
1065 eval_jeq_jne(frd, frs);
1066 else if (op == BPF_JGT)
1067 eval_jgt_jle(trd, trs, frd, frs);
1068 else if (op == EBPF_JLE)
1069 eval_jgt_jle(frd, frs, trd, trs);
1070 else if (op == EBPF_JLT)
1071 eval_jlt_jge(trd, trs, frd, frs);
1072 else if (op == BPF_JGE)
1073 eval_jlt_jge(frd, frs, trd, trs);
1074 else if (op == EBPF_JSGT)
1075 eval_jsgt_jsle(trd, trs, frd, frs);
1076 else if (op == EBPF_JSLE)
1077 eval_jsgt_jsle(frd, frs, trd, trs);
1078 else if (op == EBPF_JLT)
1079 eval_jslt_jsge(trd, trs, frd, frs);
1080 else if (op == EBPF_JSGE)
1081 eval_jslt_jsge(frd, frs, trd, trs);
1087 * validate parameters for each instruction type.
1089 static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = {
1090 /* ALU IMM 32-bit instructions */
1091 [(BPF_ALU | BPF_ADD | BPF_K)] = {
1092 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1093 .off = { .min = 0, .max = 0},
1094 .imm = { .min = 0, .max = UINT32_MAX,},
1097 [(BPF_ALU | BPF_SUB | BPF_K)] = {
1098 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1099 .off = { .min = 0, .max = 0},
1100 .imm = { .min = 0, .max = UINT32_MAX,},
1103 [(BPF_ALU | BPF_AND | BPF_K)] = {
1104 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1105 .off = { .min = 0, .max = 0},
1106 .imm = { .min = 0, .max = UINT32_MAX,},
1109 [(BPF_ALU | BPF_OR | BPF_K)] = {
1110 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1111 .off = { .min = 0, .max = 0},
1112 .imm = { .min = 0, .max = UINT32_MAX,},
1115 [(BPF_ALU | BPF_LSH | BPF_K)] = {
1116 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1117 .off = { .min = 0, .max = 0},
1118 .imm = { .min = 0, .max = UINT32_MAX,},
1121 [(BPF_ALU | BPF_RSH | BPF_K)] = {
1122 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1123 .off = { .min = 0, .max = 0},
1124 .imm = { .min = 0, .max = UINT32_MAX,},
1127 [(BPF_ALU | BPF_XOR | BPF_K)] = {
1128 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1129 .off = { .min = 0, .max = 0},
1130 .imm = { .min = 0, .max = UINT32_MAX,},
1133 [(BPF_ALU | BPF_MUL | BPF_K)] = {
1134 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1135 .off = { .min = 0, .max = 0},
1136 .imm = { .min = 0, .max = UINT32_MAX,},
1139 [(BPF_ALU | EBPF_MOV | BPF_K)] = {
1140 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1141 .off = { .min = 0, .max = 0},
1142 .imm = { .min = 0, .max = UINT32_MAX,},
1145 [(BPF_ALU | BPF_DIV | BPF_K)] = {
1146 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1147 .off = { .min = 0, .max = 0},
1148 .imm = { .min = 1, .max = UINT32_MAX},
1151 [(BPF_ALU | BPF_MOD | BPF_K)] = {
1152 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1153 .off = { .min = 0, .max = 0},
1154 .imm = { .min = 1, .max = UINT32_MAX},
1157 /* ALU IMM 64-bit instructions */
1158 [(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
1159 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1160 .off = { .min = 0, .max = 0},
1161 .imm = { .min = 0, .max = UINT32_MAX,},
1164 [(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
1165 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1166 .off = { .min = 0, .max = 0},
1167 .imm = { .min = 0, .max = UINT32_MAX,},
1170 [(EBPF_ALU64 | BPF_AND | BPF_K)] = {
1171 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1172 .off = { .min = 0, .max = 0},
1173 .imm = { .min = 0, .max = UINT32_MAX,},
1176 [(EBPF_ALU64 | BPF_OR | BPF_K)] = {
1177 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1178 .off = { .min = 0, .max = 0},
1179 .imm = { .min = 0, .max = UINT32_MAX,},
1182 [(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
1183 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1184 .off = { .min = 0, .max = 0},
1185 .imm = { .min = 0, .max = UINT32_MAX,},
1188 [(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
1189 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1190 .off = { .min = 0, .max = 0},
1191 .imm = { .min = 0, .max = UINT32_MAX,},
1194 [(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
1195 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1196 .off = { .min = 0, .max = 0},
1197 .imm = { .min = 0, .max = UINT32_MAX,},
1200 [(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
1201 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1202 .off = { .min = 0, .max = 0},
1203 .imm = { .min = 0, .max = UINT32_MAX,},
1206 [(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
1207 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1208 .off = { .min = 0, .max = 0},
1209 .imm = { .min = 0, .max = UINT32_MAX,},
1212 [(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
1213 .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
1214 .off = { .min = 0, .max = 0},
1215 .imm = { .min = 0, .max = UINT32_MAX,},
1218 [(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
1219 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1220 .off = { .min = 0, .max = 0},
1221 .imm = { .min = 1, .max = UINT32_MAX},
1224 [(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
1225 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1226 .off = { .min = 0, .max = 0},
1227 .imm = { .min = 1, .max = UINT32_MAX},
1230 /* ALU REG 32-bit instructions */
1231 [(BPF_ALU | BPF_ADD | BPF_X)] = {
1232 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1233 .off = { .min = 0, .max = 0},
1234 .imm = { .min = 0, .max = 0},
1237 [(BPF_ALU | BPF_SUB | BPF_X)] = {
1238 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1239 .off = { .min = 0, .max = 0},
1240 .imm = { .min = 0, .max = 0},
1243 [(BPF_ALU | BPF_AND | BPF_X)] = {
1244 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1245 .off = { .min = 0, .max = 0},
1246 .imm = { .min = 0, .max = 0},
1249 [(BPF_ALU | BPF_OR | BPF_X)] = {
1250 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1251 .off = { .min = 0, .max = 0},
1252 .imm = { .min = 0, .max = 0},
1255 [(BPF_ALU | BPF_LSH | BPF_X)] = {
1256 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1257 .off = { .min = 0, .max = 0},
1258 .imm = { .min = 0, .max = 0},
1261 [(BPF_ALU | BPF_RSH | BPF_X)] = {
1262 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1263 .off = { .min = 0, .max = 0},
1264 .imm = { .min = 0, .max = 0},
1267 [(BPF_ALU | BPF_XOR | BPF_X)] = {
1268 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1269 .off = { .min = 0, .max = 0},
1270 .imm = { .min = 0, .max = 0},
1273 [(BPF_ALU | BPF_MUL | BPF_X)] = {
1274 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1275 .off = { .min = 0, .max = 0},
1276 .imm = { .min = 0, .max = 0},
1279 [(BPF_ALU | BPF_DIV | BPF_X)] = {
1280 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1281 .off = { .min = 0, .max = 0},
1282 .imm = { .min = 0, .max = 0},
1285 [(BPF_ALU | BPF_MOD | BPF_X)] = {
1286 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1287 .off = { .min = 0, .max = 0},
1288 .imm = { .min = 0, .max = 0},
1291 [(BPF_ALU | EBPF_MOV | BPF_X)] = {
1292 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1293 .off = { .min = 0, .max = 0},
1294 .imm = { .min = 0, .max = 0},
1297 [(BPF_ALU | BPF_NEG)] = {
1298 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1299 .off = { .min = 0, .max = 0},
1300 .imm = { .min = 0, .max = 0},
1303 [(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
1304 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1305 .off = { .min = 0, .max = 0},
1306 .imm = { .min = 16, .max = 64},
1307 .check = check_alu_bele,
1310 [(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
1311 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1312 .off = { .min = 0, .max = 0},
1313 .imm = { .min = 16, .max = 64},
1314 .check = check_alu_bele,
1317 /* ALU REG 64-bit instructions */
1318 [(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
1319 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1320 .off = { .min = 0, .max = 0},
1321 .imm = { .min = 0, .max = 0},
1324 [(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
1325 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1326 .off = { .min = 0, .max = 0},
1327 .imm = { .min = 0, .max = 0},
1330 [(EBPF_ALU64 | BPF_AND | BPF_X)] = {
1331 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1332 .off = { .min = 0, .max = 0},
1333 .imm = { .min = 0, .max = 0},
1336 [(EBPF_ALU64 | BPF_OR | BPF_X)] = {
1337 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1338 .off = { .min = 0, .max = 0},
1339 .imm = { .min = 0, .max = 0},
1342 [(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
1343 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1344 .off = { .min = 0, .max = 0},
1345 .imm = { .min = 0, .max = 0},
1348 [(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
1349 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1350 .off = { .min = 0, .max = 0},
1351 .imm = { .min = 0, .max = 0},
1354 [(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
1355 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1356 .off = { .min = 0, .max = 0},
1357 .imm = { .min = 0, .max = 0},
1360 [(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
1361 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1362 .off = { .min = 0, .max = 0},
1363 .imm = { .min = 0, .max = 0},
1366 [(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
1367 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1368 .off = { .min = 0, .max = 0},
1369 .imm = { .min = 0, .max = 0},
1372 [(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
1373 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1374 .off = { .min = 0, .max = 0},
1375 .imm = { .min = 0, .max = 0},
1378 [(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
1379 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1380 .off = { .min = 0, .max = 0},
1381 .imm = { .min = 0, .max = 0},
1384 [(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
1385 .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
1386 .off = { .min = 0, .max = 0},
1387 .imm = { .min = 0, .max = 0},
1390 [(EBPF_ALU64 | BPF_NEG)] = {
1391 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1392 .off = { .min = 0, .max = 0},
1393 .imm = { .min = 0, .max = 0},
1396 /* load instructions */
1397 [(BPF_LDX | BPF_MEM | BPF_B)] = {
1398 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1399 .off = { .min = 0, .max = UINT16_MAX},
1400 .imm = { .min = 0, .max = 0},
1403 [(BPF_LDX | BPF_MEM | BPF_H)] = {
1404 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1405 .off = { .min = 0, .max = UINT16_MAX},
1406 .imm = { .min = 0, .max = 0},
1409 [(BPF_LDX | BPF_MEM | BPF_W)] = {
1410 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1411 .off = { .min = 0, .max = UINT16_MAX},
1412 .imm = { .min = 0, .max = 0},
1415 [(BPF_LDX | BPF_MEM | EBPF_DW)] = {
1416 .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
1417 .off = { .min = 0, .max = UINT16_MAX},
1418 .imm = { .min = 0, .max = 0},
1421 /* load 64 bit immediate value */
1422 [(BPF_LD | BPF_IMM | EBPF_DW)] = {
1423 .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
1424 .off = { .min = 0, .max = 0},
1425 .imm = { .min = 0, .max = UINT32_MAX},
1426 .eval = eval_ld_imm64,
1428 /* store REG instructions */
1429 [(BPF_STX | BPF_MEM | BPF_B)] = {
1430 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1431 .off = { .min = 0, .max = UINT16_MAX},
1432 .imm = { .min = 0, .max = 0},
1435 [(BPF_STX | BPF_MEM | BPF_H)] = {
1436 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1437 .off = { .min = 0, .max = UINT16_MAX},
1438 .imm = { .min = 0, .max = 0},
1441 [(BPF_STX | BPF_MEM | BPF_W)] = {
1442 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1443 .off = { .min = 0, .max = UINT16_MAX},
1444 .imm = { .min = 0, .max = 0},
1447 [(BPF_STX | BPF_MEM | EBPF_DW)] = {
1448 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1449 .off = { .min = 0, .max = UINT16_MAX},
1450 .imm = { .min = 0, .max = 0},
1453 /* atomic add instructions */
1454 [(BPF_STX | EBPF_XADD | BPF_W)] = {
1455 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1456 .off = { .min = 0, .max = UINT16_MAX},
1457 .imm = { .min = 0, .max = 0},
1460 [(BPF_STX | EBPF_XADD | EBPF_DW)] = {
1461 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1462 .off = { .min = 0, .max = UINT16_MAX},
1463 .imm = { .min = 0, .max = 0},
1466 /* store IMM instructions */
1467 [(BPF_ST | BPF_MEM | BPF_B)] = {
1468 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1469 .off = { .min = 0, .max = UINT16_MAX},
1470 .imm = { .min = 0, .max = UINT32_MAX},
1473 [(BPF_ST | BPF_MEM | BPF_H)] = {
1474 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1475 .off = { .min = 0, .max = UINT16_MAX},
1476 .imm = { .min = 0, .max = UINT32_MAX},
1479 [(BPF_ST | BPF_MEM | BPF_W)] = {
1480 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1481 .off = { .min = 0, .max = UINT16_MAX},
1482 .imm = { .min = 0, .max = UINT32_MAX},
1485 [(BPF_ST | BPF_MEM | EBPF_DW)] = {
1486 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1487 .off = { .min = 0, .max = UINT16_MAX},
1488 .imm = { .min = 0, .max = UINT32_MAX},
1491 /* jump instruction */
1492 [(BPF_JMP | BPF_JA)] = {
1493 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1494 .off = { .min = 0, .max = UINT16_MAX},
1495 .imm = { .min = 0, .max = 0},
1497 /* jcc IMM instructions */
1498 [(BPF_JMP | BPF_JEQ | BPF_K)] = {
1499 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1500 .off = { .min = 0, .max = UINT16_MAX},
1501 .imm = { .min = 0, .max = UINT32_MAX},
1504 [(BPF_JMP | EBPF_JNE | BPF_K)] = {
1505 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1506 .off = { .min = 0, .max = UINT16_MAX},
1507 .imm = { .min = 0, .max = UINT32_MAX},
1510 [(BPF_JMP | BPF_JGT | BPF_K)] = {
1511 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1512 .off = { .min = 0, .max = UINT16_MAX},
1513 .imm = { .min = 0, .max = UINT32_MAX},
1516 [(BPF_JMP | EBPF_JLT | BPF_K)] = {
1517 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1518 .off = { .min = 0, .max = UINT16_MAX},
1519 .imm = { .min = 0, .max = UINT32_MAX},
1522 [(BPF_JMP | BPF_JGE | BPF_K)] = {
1523 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1524 .off = { .min = 0, .max = UINT16_MAX},
1525 .imm = { .min = 0, .max = UINT32_MAX},
1528 [(BPF_JMP | EBPF_JLE | BPF_K)] = {
1529 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1530 .off = { .min = 0, .max = UINT16_MAX},
1531 .imm = { .min = 0, .max = UINT32_MAX},
1534 [(BPF_JMP | EBPF_JSGT | BPF_K)] = {
1535 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1536 .off = { .min = 0, .max = UINT16_MAX},
1537 .imm = { .min = 0, .max = UINT32_MAX},
1540 [(BPF_JMP | EBPF_JSLT | BPF_K)] = {
1541 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1542 .off = { .min = 0, .max = UINT16_MAX},
1543 .imm = { .min = 0, .max = UINT32_MAX},
1546 [(BPF_JMP | EBPF_JSGE | BPF_K)] = {
1547 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1548 .off = { .min = 0, .max = UINT16_MAX},
1549 .imm = { .min = 0, .max = UINT32_MAX},
1552 [(BPF_JMP | EBPF_JSLE | BPF_K)] = {
1553 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1554 .off = { .min = 0, .max = UINT16_MAX},
1555 .imm = { .min = 0, .max = UINT32_MAX},
1558 [(BPF_JMP | BPF_JSET | BPF_K)] = {
1559 .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
1560 .off = { .min = 0, .max = UINT16_MAX},
1561 .imm = { .min = 0, .max = UINT32_MAX},
1564 /* jcc REG instructions */
1565 [(BPF_JMP | BPF_JEQ | BPF_X)] = {
1566 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1567 .off = { .min = 0, .max = UINT16_MAX},
1568 .imm = { .min = 0, .max = 0},
1571 [(BPF_JMP | EBPF_JNE | BPF_X)] = {
1572 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1573 .off = { .min = 0, .max = UINT16_MAX},
1574 .imm = { .min = 0, .max = 0},
1577 [(BPF_JMP | BPF_JGT | BPF_X)] = {
1578 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1579 .off = { .min = 0, .max = UINT16_MAX},
1580 .imm = { .min = 0, .max = 0},
1583 [(BPF_JMP | EBPF_JLT | BPF_X)] = {
1584 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1585 .off = { .min = 0, .max = UINT16_MAX},
1586 .imm = { .min = 0, .max = 0},
1589 [(BPF_JMP | BPF_JGE | BPF_X)] = {
1590 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1591 .off = { .min = 0, .max = UINT16_MAX},
1592 .imm = { .min = 0, .max = 0},
1595 [(BPF_JMP | EBPF_JLE | BPF_X)] = {
1596 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1597 .off = { .min = 0, .max = UINT16_MAX},
1598 .imm = { .min = 0, .max = 0},
1601 [(BPF_JMP | EBPF_JSGT | BPF_X)] = {
1602 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1603 .off = { .min = 0, .max = UINT16_MAX},
1604 .imm = { .min = 0, .max = 0},
1607 [(BPF_JMP | EBPF_JSLT | BPF_X)] = {
1608 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1609 .off = { .min = 0, .max = UINT16_MAX},
1610 .imm = { .min = 0, .max = 0},
1612 [(BPF_JMP | EBPF_JSGE | BPF_X)] = {
1613 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1614 .off = { .min = 0, .max = UINT16_MAX},
1615 .imm = { .min = 0, .max = 0},
1618 [(BPF_JMP | EBPF_JSLE | BPF_X)] = {
1619 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1620 .off = { .min = 0, .max = UINT16_MAX},
1621 .imm = { .min = 0, .max = 0},
1624 [(BPF_JMP | BPF_JSET | BPF_X)] = {
1625 .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
1626 .off = { .min = 0, .max = UINT16_MAX},
1627 .imm = { .min = 0, .max = 0},
1630 /* call instruction */
1631 [(BPF_JMP | EBPF_CALL)] = {
1632 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1633 .off = { .min = 0, .max = 0},
1634 .imm = { .min = 0, .max = UINT32_MAX},
1637 /* ret instruction */
1638 [(BPF_JMP | EBPF_EXIT)] = {
1639 .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
1640 .off = { .min = 0, .max = 0},
1641 .imm = { .min = 0, .max = 0},
1647 * make sure that instruction syntax is valid,
1648 * and it fields don't violate partciular instrcution type restrictions.
1651 check_syntax(const struct ebpf_insn *ins)
1660 if (ins_chk[op].mask.dreg == 0)
1661 return "invalid opcode";
1663 if ((ins_chk[op].mask.dreg & 1 << ins->dst_reg) == 0)
1664 return "invalid dst-reg field";
1666 if ((ins_chk[op].mask.sreg & 1 << ins->src_reg) == 0)
1667 return "invalid src-reg field";
1670 if (ins_chk[op].off.min > off || ins_chk[op].off.max < off)
1671 return "invalid off field";
1674 if (ins_chk[op].imm.min > imm || ins_chk[op].imm.max < imm)
1675 return "invalid imm field";
1677 if (ins_chk[op].check != NULL)
1678 return ins_chk[op].check(ins);
1684 * helper function, return instruction index for the given node.
1687 get_node_idx(const struct bpf_verifier *bvf, const struct inst_node *node)
1689 return node - bvf->in;
1693 * helper function, used to walk through constructed CFG.
1695 static struct inst_node *
1696 get_next_node(struct bpf_verifier *bvf, struct inst_node *node)
1698 uint32_t ce, ne, dst;
1701 ce = node->cur_edge;
1706 dst = node->edge_dest[ce];
1707 return bvf->in + dst;
1711 set_node_colour(struct bpf_verifier *bvf, struct inst_node *node,
1716 prev = node->colour;
1719 bvf->node_colour[prev]--;
1720 bvf->node_colour[new]++;
1724 * helper function, add new edge between two nodes.
1727 add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
1731 if (nidx > bvf->prm->nb_ins) {
1732 RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
1734 __func__, get_node_idx(bvf, node), nidx);
1739 if (ne >= RTE_DIM(node->edge_dest)) {
1740 RTE_BPF_LOG(ERR, "%s: internal error at pc: %u\n",
1741 __func__, get_node_idx(bvf, node));
1745 node->edge_dest[ne] = nidx;
1746 node->nb_edge = ne + 1;
1751 * helper function, determine type of edge between two nodes.
1754 set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
1755 const struct inst_node *next)
1757 uint32_t ce, clr, type;
1759 ce = node->cur_edge - 1;
1762 type = UNKNOWN_EDGE;
1766 else if (clr == GREY)
1768 else if (clr == BLACK)
1770 * in fact it could be either direct or cross edge,
1771 * but for now, we don't need to distinguish between them.
1775 node->edge_type[ce] = type;
1776 bvf->edge_type[type]++;
1779 static struct inst_node *
1780 get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
1782 return bvf->in + node->prev_node;
1786 * Depth-First Search (DFS) through previously constructed
1787 * Control Flow Graph (CFG).
1788 * Information collected at this path would be used later
1789 * to determine is there any loops, and/or unreachable instructions.
1792 dfs(struct bpf_verifier *bvf)
1794 struct inst_node *next, *node;
1797 while (node != NULL) {
1799 if (node->colour == WHITE)
1800 set_node_colour(bvf, node, GREY);
1802 if (node->colour == GREY) {
1804 /* find next unprocessed child node */
1806 next = get_next_node(bvf, node);
1809 set_edge_type(bvf, node, next);
1810 } while (next->colour != WHITE);
1813 /* proceed with next child */
1814 next->prev_node = get_node_idx(bvf, node);
1818 * finished with current node and all it's kids,
1819 * proceed with parent
1821 set_node_colour(bvf, node, BLACK);
1823 node = get_prev_node(bvf, node);
1831 * report unreachable instructions.
1834 log_unreachable(const struct bpf_verifier *bvf)
1837 struct inst_node *node;
1838 const struct ebpf_insn *ins;
1840 for (i = 0; i != bvf->prm->nb_ins; i++) {
1843 ins = bvf->prm->ins + i;
1845 if (node->colour == WHITE &&
1846 ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
1847 RTE_BPF_LOG(ERR, "unreachable code at pc: %u;\n", i);
1852 * report loops detected.
1855 log_loop(const struct bpf_verifier *bvf)
1858 struct inst_node *node;
1860 for (i = 0; i != bvf->prm->nb_ins; i++) {
1863 if (node->colour != BLACK)
1866 for (j = 0; j != node->nb_edge; j++) {
1867 if (node->edge_type[j] == BACK_EDGE)
1869 "loop at pc:%u --> pc:%u;\n",
1870 i, node->edge_dest[j]);
1876 * First pass goes though all instructions in the set, checks that each
1877 * instruction is a valid one (correct syntax, valid field values, etc.)
1878 * and constructs control flow graph (CFG).
1879 * Then deapth-first search is performed over the constructed graph.
1880 * Programs with unreachable instructions and/or loops will be rejected.
1883 validate(struct bpf_verifier *bvf)
1887 struct inst_node *node;
1888 const struct ebpf_insn *ins;
1892 for (i = 0; i < bvf->prm->nb_ins; i++) {
1894 ins = bvf->prm->ins + i;
1897 err = check_syntax(ins);
1899 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
1905 * construct CFG, jcc nodes have to outgoing edges,
1906 * 'exit' nodes - none, all others nodes have exaclty one
1909 switch (ins->code) {
1910 case (BPF_JMP | EBPF_EXIT):
1912 case (BPF_JMP | BPF_JEQ | BPF_K):
1913 case (BPF_JMP | EBPF_JNE | BPF_K):
1914 case (BPF_JMP | BPF_JGT | BPF_K):
1915 case (BPF_JMP | EBPF_JLT | BPF_K):
1916 case (BPF_JMP | BPF_JGE | BPF_K):
1917 case (BPF_JMP | EBPF_JLE | BPF_K):
1918 case (BPF_JMP | EBPF_JSGT | BPF_K):
1919 case (BPF_JMP | EBPF_JSLT | BPF_K):
1920 case (BPF_JMP | EBPF_JSGE | BPF_K):
1921 case (BPF_JMP | EBPF_JSLE | BPF_K):
1922 case (BPF_JMP | BPF_JSET | BPF_K):
1923 case (BPF_JMP | BPF_JEQ | BPF_X):
1924 case (BPF_JMP | EBPF_JNE | BPF_X):
1925 case (BPF_JMP | BPF_JGT | BPF_X):
1926 case (BPF_JMP | EBPF_JLT | BPF_X):
1927 case (BPF_JMP | BPF_JGE | BPF_X):
1928 case (BPF_JMP | EBPF_JLE | BPF_X):
1929 case (BPF_JMP | EBPF_JSGT | BPF_X):
1930 case (BPF_JMP | EBPF_JSLT | BPF_X):
1931 case (BPF_JMP | EBPF_JSGE | BPF_X):
1932 case (BPF_JMP | EBPF_JSLE | BPF_X):
1933 case (BPF_JMP | BPF_JSET | BPF_X):
1934 rc |= add_edge(bvf, node, i + ins->off + 1);
1935 rc |= add_edge(bvf, node, i + 1);
1936 bvf->nb_jcc_nodes++;
1938 case (BPF_JMP | BPF_JA):
1939 rc |= add_edge(bvf, node, i + ins->off + 1);
1941 /* load 64 bit immediate value */
1942 case (BPF_LD | BPF_IMM | EBPF_DW):
1943 rc |= add_edge(bvf, node, i + 2);
1947 rc |= add_edge(bvf, node, i + 1);
1952 bvf->node_colour[WHITE]++;
1960 RTE_BPF_LOG(DEBUG, "%s(%p) stats:\n"
1962 "nb_jcc_nodes=%u;\n"
1963 "node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
1964 "edge_type={[UNKNOWN]=%u, [TREE]=%u, [BACK]=%u, [CROSS]=%u};\n",
1968 bvf->node_colour[WHITE], bvf->node_colour[GREY],
1969 bvf->node_colour[BLACK],
1970 bvf->edge_type[UNKNOWN_EDGE], bvf->edge_type[TREE_EDGE],
1971 bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
1973 if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
1974 RTE_BPF_LOG(ERR, "%s(%p) unreachable instructions;\n",
1976 log_unreachable(bvf);
1980 if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
1981 bvf->edge_type[UNKNOWN_EDGE] != 0) {
1982 RTE_BPF_LOG(ERR, "%s(%p) DFS internal error;\n",
1987 if (bvf->edge_type[BACK_EDGE] != 0) {
1988 RTE_BPF_LOG(ERR, "%s(%p) loops detected;\n",
1998 * helper functions get/free eval states.
2000 static struct bpf_eval_state *
2001 pull_eval_state(struct bpf_verifier *bvf)
2005 n = bvf->evst_pool.cur;
2006 if (n == bvf->evst_pool.num)
2009 bvf->evst_pool.cur = n + 1;
2010 return bvf->evst_pool.ent + n;
2014 push_eval_state(struct bpf_verifier *bvf)
2016 bvf->evst_pool.cur--;
2020 evst_pool_fini(struct bpf_verifier *bvf)
2023 free(bvf->evst_pool.ent);
2024 memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
2028 evst_pool_init(struct bpf_verifier *bvf)
2032 n = bvf->nb_jcc_nodes + 1;
2034 bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
2035 if (bvf->evst_pool.ent == NULL)
2038 bvf->evst_pool.num = n;
2039 bvf->evst_pool.cur = 0;
2041 bvf->evst = pull_eval_state(bvf);
2046 * Save current eval state.
2049 save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2051 struct bpf_eval_state *st;
2053 /* get new eval_state for this node */
2054 st = pull_eval_state(bvf);
2057 "%s: internal error (out of space) at pc: %u\n",
2058 __func__, get_node_idx(bvf, node));
2062 /* make a copy of current state */
2063 memcpy(st, bvf->evst, sizeof(*st));
2065 /* swap current state with new one */
2066 node->evst = bvf->evst;
2069 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2070 __func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
2076 * Restore previous eval state and mark current eval state as free.
2079 restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
2081 RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
2082 __func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
2084 bvf->evst = node->evst;
2086 push_eval_state(bvf);
2090 log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
2091 uint32_t pc, int32_t loglvl)
2093 const struct bpf_eval_state *st;
2094 const struct bpf_reg_val *rv;
2096 rte_log(loglvl, rte_bpf_logtype, "%s(pc=%u):\n", __func__, pc);
2099 rv = st->rv + ins->dst_reg;
2101 rte_log(loglvl, rte_bpf_logtype,
2103 "\tv={type=%u, size=%zu},\n"
2104 "\tmask=0x%" PRIx64 ",\n"
2105 "\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
2106 "\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
2109 rv->v.type, rv->v.size,
2111 rv->u.min, rv->u.max,
2112 rv->s.min, rv->s.max);
2116 * Do second pass through CFG and try to evaluate instructions
2117 * via each possible path.
2118 * Right now evaluation functionality is quite limited.
2119 * Still need to add extra checks for:
2120 * - use/return uninitialized registers.
2121 * - use uninitialized data from the stack.
2122 * - memory boundaries violation.
2125 evaluate(struct bpf_verifier *bvf)
2130 const struct ebpf_insn *ins;
2131 struct inst_node *next, *node;
2133 /* initial state of frame pointer */
2134 static const struct bpf_reg_val rvfp = {
2136 .type = BPF_ARG_PTR_STACK,
2137 .size = MAX_BPF_STACK_SIZE,
2140 .u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2141 .s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
2144 bvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;
2145 bvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;
2146 if (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)
2147 eval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);
2149 bvf->evst->rv[EBPF_REG_10] = rvfp;
2151 ins = bvf->prm->ins;
2156 while (node != NULL && rc == 0) {
2159 * current node evaluation, make sure we evaluate
2160 * each node only once.
2165 idx = get_node_idx(bvf, node);
2168 /* for jcc node make a copy of evaluatoion state */
2169 if (node->nb_edge > 1)
2170 rc |= save_eval_state(bvf, node);
2172 if (ins_chk[op].eval != NULL && rc == 0) {
2173 err = ins_chk[op].eval(bvf, ins + idx);
2175 RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
2176 __func__, err, idx);
2181 log_eval_state(bvf, ins + idx, idx, RTE_LOG_DEBUG);
2185 /* proceed through CFG */
2186 next = get_next_node(bvf, node);
2189 /* proceed with next child */
2190 if (node->cur_edge == node->nb_edge &&
2192 restore_eval_state(bvf, node);
2194 next->prev_node = get_node_idx(bvf, node);
2198 * finished with current node and all it's kids,
2199 * proceed with parent
2202 node = get_prev_node(bvf, node);
2205 if (node == bvf->in)
2214 bpf_validate(struct rte_bpf *bpf)
2217 struct bpf_verifier bvf;
2219 /* check input argument type, don't allow mbuf ptr on 32-bit */
2220 if (bpf->prm.prog_arg.type != RTE_BPF_ARG_RAW &&
2221 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
2222 (sizeof(uint64_t) != sizeof(uintptr_t) ||
2223 bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
2224 RTE_BPF_LOG(ERR, "%s: unsupported argument type\n", __func__);
2228 memset(&bvf, 0, sizeof(bvf));
2229 bvf.prm = &bpf->prm;
2230 bvf.in = calloc(bpf->prm.nb_ins, sizeof(bvf.in[0]));
2234 rc = validate(&bvf);
2237 rc = evst_pool_init(&bvf);
2239 rc = evaluate(&bvf);
2240 evst_pool_fini(&bvf);
2245 /* copy collected info */
2247 bpf->stack_sz = bvf.stack_sz;