1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_random.h>
14 #include <rte_byteorder.h>
15 #include <rte_errno.h>
17 #include <rte_ether.h>
23 * Basic functional tests for librte_bpf.
24 * The main procedure - load eBPF program, execute it and
25 * compare restuls with expected values.
36 struct dummy_offset in[8];
37 struct dummy_offset out[8];
41 struct rte_ether_hdr eth_hdr;
42 struct rte_vlan_hdr vlan_hdr;
43 struct rte_ipv4_hdr ip_hdr;
46 #define DUMMY_MBUF_NUM 2
48 /* first mbuf in the packet, should always be at offset 0 */
50 struct rte_mbuf mb[DUMMY_MBUF_NUM];
51 uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE];
54 #define TEST_FILL_1 0xDEADBEEF
57 #define TEST_MUL_2 -100
59 #define TEST_SHIFT_1 15
60 #define TEST_SHIFT_2 33
62 #define TEST_SHIFT32_MASK (CHAR_BIT * sizeof(uint32_t) - 1)
63 #define TEST_SHIFT64_MASK (CHAR_BIT * sizeof(uint64_t) - 1)
66 #define TEST_JCC_2 -123
67 #define TEST_JCC_3 5678
68 #define TEST_JCC_4 TEST_FILL_1
70 #define TEST_IMM_1 UINT64_MAX
71 #define TEST_IMM_2 ((uint64_t)INT64_MIN)
72 #define TEST_IMM_3 ((uint64_t)INT64_MAX + INT32_MAX)
73 #define TEST_IMM_4 ((uint64_t)UINT32_MAX)
74 #define TEST_IMM_5 ((uint64_t)UINT32_MAX + 1)
76 #define TEST_MEMFROB 0x2a2a2a2a
78 #define STRING_GEEK 0x6B656567
79 #define STRING_WEEK 0x6B656577
81 #define TEST_NETMASK 0xffffff00
82 #define TEST_SUBNET 0xaca80200
84 uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
85 uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
87 uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1;
88 uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2;
93 struct rte_bpf_prm prm;
94 void (*prepare)(void *);
95 int (*check_result)(uint64_t, const void *);
100 * Compare return value and result data with expected ones.
101 * Report a failure if they don't match.
104 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
105 const void *exp_res, const void *ret_res, size_t res_sz)
110 if (exp_rc != ret_rc) {
111 printf("%s@%d: invalid return value, expected: 0x%" PRIx64
112 ",result: 0x%" PRIx64 "\n",
113 func, __LINE__, exp_rc, ret_rc);
117 if (memcmp(exp_res, ret_res, res_sz) != 0) {
118 printf("%s: invalid value\n", func);
119 rte_memdump(stdout, "expected", exp_res, res_sz);
120 rte_memdump(stdout, "result", ret_res, res_sz);
127 /* store immediate test-cases */
128 static const struct ebpf_insn test_store1_prog[] = {
130 .code = (BPF_ST | BPF_MEM | BPF_B),
131 .dst_reg = EBPF_REG_1,
132 .off = offsetof(struct dummy_offset, u8),
136 .code = (BPF_ST | BPF_MEM | BPF_H),
137 .dst_reg = EBPF_REG_1,
138 .off = offsetof(struct dummy_offset, u16),
142 .code = (BPF_ST | BPF_MEM | BPF_W),
143 .dst_reg = EBPF_REG_1,
144 .off = offsetof(struct dummy_offset, u32),
148 .code = (BPF_ST | BPF_MEM | EBPF_DW),
149 .dst_reg = EBPF_REG_1,
150 .off = offsetof(struct dummy_offset, u64),
155 .code = (BPF_ALU | EBPF_MOV | BPF_K),
156 .dst_reg = EBPF_REG_0,
160 .code = (BPF_JMP | EBPF_EXIT),
165 test_store1_prepare(void *arg)
167 struct dummy_offset *df;
170 memset(df, 0, sizeof(*df));
174 test_store1_check(uint64_t rc, const void *arg)
176 const struct dummy_offset *dft;
177 struct dummy_offset dfe;
181 memset(&dfe, 0, sizeof(dfe));
182 dfe.u64 = (int32_t)TEST_FILL_1;
187 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
190 /* store register test-cases */
191 static const struct ebpf_insn test_store2_prog[] = {
194 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
195 .dst_reg = EBPF_REG_2,
199 .code = (BPF_STX | BPF_MEM | BPF_B),
200 .dst_reg = EBPF_REG_1,
201 .src_reg = EBPF_REG_2,
202 .off = offsetof(struct dummy_offset, u8),
205 .code = (BPF_STX | BPF_MEM | BPF_H),
206 .dst_reg = EBPF_REG_1,
207 .src_reg = EBPF_REG_2,
208 .off = offsetof(struct dummy_offset, u16),
211 .code = (BPF_STX | BPF_MEM | BPF_W),
212 .dst_reg = EBPF_REG_1,
213 .src_reg = EBPF_REG_2,
214 .off = offsetof(struct dummy_offset, u32),
217 .code = (BPF_STX | BPF_MEM | EBPF_DW),
218 .dst_reg = EBPF_REG_1,
219 .src_reg = EBPF_REG_2,
220 .off = offsetof(struct dummy_offset, u64),
224 .code = (BPF_ALU | EBPF_MOV | BPF_K),
225 .dst_reg = EBPF_REG_0,
229 .code = (BPF_JMP | EBPF_EXIT),
233 /* load test-cases */
234 static const struct ebpf_insn test_load1_prog[] = {
237 .code = (BPF_LDX | BPF_MEM | BPF_B),
238 .dst_reg = EBPF_REG_2,
239 .src_reg = EBPF_REG_1,
240 .off = offsetof(struct dummy_offset, u8),
243 .code = (BPF_LDX | BPF_MEM | BPF_H),
244 .dst_reg = EBPF_REG_3,
245 .src_reg = EBPF_REG_1,
246 .off = offsetof(struct dummy_offset, u16),
249 .code = (BPF_LDX | BPF_MEM | BPF_W),
250 .dst_reg = EBPF_REG_4,
251 .src_reg = EBPF_REG_1,
252 .off = offsetof(struct dummy_offset, u32),
255 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
256 .dst_reg = EBPF_REG_0,
257 .src_reg = EBPF_REG_1,
258 .off = offsetof(struct dummy_offset, u64),
262 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
263 .dst_reg = EBPF_REG_0,
264 .src_reg = EBPF_REG_4,
267 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
268 .dst_reg = EBPF_REG_0,
269 .src_reg = EBPF_REG_3,
272 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
273 .dst_reg = EBPF_REG_0,
274 .src_reg = EBPF_REG_2,
277 .code = (BPF_JMP | EBPF_EXIT),
282 test_load1_prepare(void *arg)
284 struct dummy_offset *df;
288 memset(df, 0, sizeof(*df));
289 df->u64 = (int32_t)TEST_FILL_1;
296 test_load1_check(uint64_t rc, const void *arg)
299 const struct dummy_offset *dft;
307 return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
310 /* load immediate test-cases */
311 static const struct ebpf_insn test_ldimm1_prog[] = {
314 .code = (BPF_LD | BPF_IMM | EBPF_DW),
315 .dst_reg = EBPF_REG_0,
316 .imm = (uint32_t)TEST_IMM_1,
319 .imm = TEST_IMM_1 >> 32,
322 .code = (BPF_LD | BPF_IMM | EBPF_DW),
323 .dst_reg = EBPF_REG_3,
324 .imm = (uint32_t)TEST_IMM_2,
327 .imm = TEST_IMM_2 >> 32,
330 .code = (BPF_LD | BPF_IMM | EBPF_DW),
331 .dst_reg = EBPF_REG_5,
332 .imm = (uint32_t)TEST_IMM_3,
335 .imm = TEST_IMM_3 >> 32,
338 .code = (BPF_LD | BPF_IMM | EBPF_DW),
339 .dst_reg = EBPF_REG_7,
340 .imm = (uint32_t)TEST_IMM_4,
343 .imm = TEST_IMM_4 >> 32,
346 .code = (BPF_LD | BPF_IMM | EBPF_DW),
347 .dst_reg = EBPF_REG_9,
348 .imm = (uint32_t)TEST_IMM_5,
351 .imm = TEST_IMM_5 >> 32,
355 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
356 .dst_reg = EBPF_REG_0,
357 .src_reg = EBPF_REG_3,
360 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
361 .dst_reg = EBPF_REG_0,
362 .src_reg = EBPF_REG_5,
365 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
366 .dst_reg = EBPF_REG_0,
367 .src_reg = EBPF_REG_7,
370 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
371 .dst_reg = EBPF_REG_0,
372 .src_reg = EBPF_REG_9,
375 .code = (BPF_JMP | EBPF_EXIT),
380 test_ldimm1_check(uint64_t rc, const void *arg)
394 return cmp_res(__func__, v1, rc, arg, arg, 0);
398 /* alu mul test-cases */
399 static const struct ebpf_insn test_mul1_prog[] = {
402 .code = (BPF_LDX | BPF_MEM | BPF_W),
403 .dst_reg = EBPF_REG_2,
404 .src_reg = EBPF_REG_1,
405 .off = offsetof(struct dummy_vect8, in[0].u32),
408 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
409 .dst_reg = EBPF_REG_3,
410 .src_reg = EBPF_REG_1,
411 .off = offsetof(struct dummy_vect8, in[1].u64),
414 .code = (BPF_LDX | BPF_MEM | BPF_W),
415 .dst_reg = EBPF_REG_4,
416 .src_reg = EBPF_REG_1,
417 .off = offsetof(struct dummy_vect8, in[2].u32),
420 .code = (BPF_ALU | BPF_MUL | BPF_K),
421 .dst_reg = EBPF_REG_2,
425 .code = (EBPF_ALU64 | BPF_MUL | BPF_K),
426 .dst_reg = EBPF_REG_3,
430 .code = (BPF_ALU | BPF_MUL | BPF_X),
431 .dst_reg = EBPF_REG_4,
432 .src_reg = EBPF_REG_2,
435 .code = (EBPF_ALU64 | BPF_MUL | BPF_X),
436 .dst_reg = EBPF_REG_4,
437 .src_reg = EBPF_REG_3,
440 .code = (BPF_STX | BPF_MEM | EBPF_DW),
441 .dst_reg = EBPF_REG_1,
442 .src_reg = EBPF_REG_2,
443 .off = offsetof(struct dummy_vect8, out[0].u64),
446 .code = (BPF_STX | BPF_MEM | EBPF_DW),
447 .dst_reg = EBPF_REG_1,
448 .src_reg = EBPF_REG_3,
449 .off = offsetof(struct dummy_vect8, out[1].u64),
452 .code = (BPF_STX | BPF_MEM | EBPF_DW),
453 .dst_reg = EBPF_REG_1,
454 .src_reg = EBPF_REG_4,
455 .off = offsetof(struct dummy_vect8, out[2].u64),
459 .code = (BPF_ALU | EBPF_MOV | BPF_K),
460 .dst_reg = EBPF_REG_0,
464 .code = (BPF_JMP | EBPF_EXIT),
469 test_mul1_prepare(void *arg)
471 struct dummy_vect8 *dv;
478 memset(dv, 0, sizeof(*dv));
480 dv->in[1].u64 = v << 12 | v >> 6;
485 test_mul1_check(uint64_t rc, const void *arg)
488 const struct dummy_vect8 *dvt;
489 struct dummy_vect8 dve;
492 memset(&dve, 0, sizeof(dve));
498 r2 = (uint32_t)r2 * TEST_MUL_1;
500 r4 = (uint32_t)(r4 * r2);
507 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
510 /* alu shift test-cases */
511 static const struct ebpf_insn test_shift1_prog[] = {
514 .code = (BPF_LDX | BPF_MEM | BPF_W),
515 .dst_reg = EBPF_REG_2,
516 .src_reg = EBPF_REG_1,
517 .off = offsetof(struct dummy_vect8, in[0].u32),
520 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
521 .dst_reg = EBPF_REG_3,
522 .src_reg = EBPF_REG_1,
523 .off = offsetof(struct dummy_vect8, in[1].u64),
526 .code = (BPF_LDX | BPF_MEM | BPF_W),
527 .dst_reg = EBPF_REG_4,
528 .src_reg = EBPF_REG_1,
529 .off = offsetof(struct dummy_vect8, in[2].u32),
532 .code = (BPF_ALU | BPF_LSH | BPF_K),
533 .dst_reg = EBPF_REG_2,
537 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
538 .dst_reg = EBPF_REG_3,
542 .code = (BPF_STX | BPF_MEM | EBPF_DW),
543 .dst_reg = EBPF_REG_1,
544 .src_reg = EBPF_REG_2,
545 .off = offsetof(struct dummy_vect8, out[0].u64),
548 .code = (BPF_STX | BPF_MEM | EBPF_DW),
549 .dst_reg = EBPF_REG_1,
550 .src_reg = EBPF_REG_3,
551 .off = offsetof(struct dummy_vect8, out[1].u64),
554 .code = (BPF_ALU | BPF_AND | BPF_K),
555 .dst_reg = EBPF_REG_4,
556 .imm = TEST_SHIFT64_MASK,
559 .code = (EBPF_ALU64 | BPF_LSH | BPF_X),
560 .dst_reg = EBPF_REG_3,
561 .src_reg = EBPF_REG_4,
564 .code = (BPF_ALU | BPF_AND | BPF_K),
565 .dst_reg = EBPF_REG_4,
566 .imm = TEST_SHIFT32_MASK,
569 .code = (BPF_ALU | BPF_RSH | BPF_X),
570 .dst_reg = EBPF_REG_2,
571 .src_reg = EBPF_REG_4,
574 .code = (BPF_STX | BPF_MEM | EBPF_DW),
575 .dst_reg = EBPF_REG_1,
576 .src_reg = EBPF_REG_2,
577 .off = offsetof(struct dummy_vect8, out[2].u64),
580 .code = (BPF_STX | BPF_MEM | EBPF_DW),
581 .dst_reg = EBPF_REG_1,
582 .src_reg = EBPF_REG_3,
583 .off = offsetof(struct dummy_vect8, out[3].u64),
586 .code = (BPF_LDX | BPF_MEM | BPF_W),
587 .dst_reg = EBPF_REG_2,
588 .src_reg = EBPF_REG_1,
589 .off = offsetof(struct dummy_vect8, in[0].u32),
592 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
593 .dst_reg = EBPF_REG_3,
594 .src_reg = EBPF_REG_1,
595 .off = offsetof(struct dummy_vect8, in[1].u64),
598 .code = (BPF_LDX | BPF_MEM | BPF_W),
599 .dst_reg = EBPF_REG_4,
600 .src_reg = EBPF_REG_1,
601 .off = offsetof(struct dummy_vect8, in[2].u32),
604 .code = (BPF_ALU | BPF_AND | BPF_K),
605 .dst_reg = EBPF_REG_2,
606 .imm = TEST_SHIFT64_MASK,
609 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
610 .dst_reg = EBPF_REG_3,
611 .src_reg = EBPF_REG_2,
614 .code = (BPF_ALU | BPF_AND | BPF_K),
615 .dst_reg = EBPF_REG_2,
616 .imm = TEST_SHIFT32_MASK,
619 .code = (BPF_ALU | BPF_LSH | BPF_X),
620 .dst_reg = EBPF_REG_4,
621 .src_reg = EBPF_REG_2,
624 .code = (BPF_STX | BPF_MEM | EBPF_DW),
625 .dst_reg = EBPF_REG_1,
626 .src_reg = EBPF_REG_4,
627 .off = offsetof(struct dummy_vect8, out[4].u64),
630 .code = (BPF_STX | BPF_MEM | EBPF_DW),
631 .dst_reg = EBPF_REG_1,
632 .src_reg = EBPF_REG_3,
633 .off = offsetof(struct dummy_vect8, out[5].u64),
637 .code = (BPF_ALU | EBPF_MOV | BPF_K),
638 .dst_reg = EBPF_REG_0,
642 .code = (BPF_JMP | EBPF_EXIT),
647 test_shift1_prepare(void *arg)
649 struct dummy_vect8 *dv;
656 memset(dv, 0, sizeof(*dv));
658 dv->in[1].u64 = v << 12 | v >> 6;
659 dv->in[2].u32 = (-v ^ 5);
663 test_shift1_check(uint64_t rc, const void *arg)
666 const struct dummy_vect8 *dvt;
667 struct dummy_vect8 dve;
670 memset(&dve, 0, sizeof(dve));
676 r2 = (uint32_t)r2 << TEST_SHIFT_1;
677 r3 = (int64_t)r3 >> TEST_SHIFT_2;
682 r4 &= TEST_SHIFT64_MASK;
684 r4 &= TEST_SHIFT32_MASK;
685 r2 = (uint32_t)r2 >> r4;
694 r2 &= TEST_SHIFT64_MASK;
695 r3 = (int64_t)r3 >> r2;
696 r2 &= TEST_SHIFT32_MASK;
697 r4 = (uint32_t)r4 << r2;
702 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
706 static const struct ebpf_insn test_jump1_prog[] = {
709 .code = (BPF_ALU | EBPF_MOV | BPF_K),
710 .dst_reg = EBPF_REG_0,
714 .code = (BPF_LDX | BPF_MEM | BPF_W),
715 .dst_reg = EBPF_REG_2,
716 .src_reg = EBPF_REG_1,
717 .off = offsetof(struct dummy_vect8, in[0].u32),
720 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
721 .dst_reg = EBPF_REG_3,
722 .src_reg = EBPF_REG_1,
723 .off = offsetof(struct dummy_vect8, in[0].u64),
726 .code = (BPF_LDX | BPF_MEM | BPF_W),
727 .dst_reg = EBPF_REG_4,
728 .src_reg = EBPF_REG_1,
729 .off = offsetof(struct dummy_vect8, in[1].u32),
732 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
733 .dst_reg = EBPF_REG_5,
734 .src_reg = EBPF_REG_1,
735 .off = offsetof(struct dummy_vect8, in[1].u64),
738 .code = (BPF_JMP | BPF_JEQ | BPF_K),
739 .dst_reg = EBPF_REG_2,
744 .code = (BPF_JMP | EBPF_JSLE | BPF_K),
745 .dst_reg = EBPF_REG_3,
750 .code = (BPF_JMP | BPF_JGT | BPF_K),
751 .dst_reg = EBPF_REG_4,
756 .code = (BPF_JMP | BPF_JSET | BPF_K),
757 .dst_reg = EBPF_REG_5,
762 .code = (BPF_JMP | EBPF_JNE | BPF_X),
763 .dst_reg = EBPF_REG_2,
764 .src_reg = EBPF_REG_3,
768 .code = (BPF_JMP | EBPF_JSGT | BPF_X),
769 .dst_reg = EBPF_REG_2,
770 .src_reg = EBPF_REG_4,
774 .code = (BPF_JMP | EBPF_JLE | BPF_X),
775 .dst_reg = EBPF_REG_2,
776 .src_reg = EBPF_REG_5,
780 .code = (BPF_JMP | BPF_JSET | BPF_X),
781 .dst_reg = EBPF_REG_3,
782 .src_reg = EBPF_REG_5,
786 .code = (BPF_JMP | EBPF_EXIT),
789 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
790 .dst_reg = EBPF_REG_0,
794 .code = (BPF_JMP | BPF_JA),
798 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
799 .dst_reg = EBPF_REG_0,
803 .code = (BPF_JMP | BPF_JA),
807 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
808 .dst_reg = EBPF_REG_0,
812 .code = (BPF_JMP | BPF_JA),
816 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
817 .dst_reg = EBPF_REG_0,
821 .code = (BPF_JMP | BPF_JA),
825 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
826 .dst_reg = EBPF_REG_0,
830 .code = (BPF_JMP | BPF_JA),
834 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
835 .dst_reg = EBPF_REG_0,
839 .code = (BPF_JMP | BPF_JA),
843 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
844 .dst_reg = EBPF_REG_0,
848 .code = (BPF_JMP | BPF_JA),
852 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
853 .dst_reg = EBPF_REG_0,
857 .code = (BPF_JMP | BPF_JA),
863 test_jump1_prepare(void *arg)
865 struct dummy_vect8 *dv;
873 memset(dv, 0, sizeof(*dv));
876 dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
877 dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
881 test_jump1_check(uint64_t rc, const void *arg)
883 uint64_t r2, r3, r4, r5, rv;
884 const struct dummy_vect8 *dvt;
894 if (r2 == TEST_JCC_1)
896 if ((int64_t)r3 <= TEST_JCC_2)
904 if ((int64_t)r2 > (int64_t)r4)
911 return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
914 /* Jump test case - check ip4_dest in particular subnet */
915 static const struct ebpf_insn test_jump2_prog[] = {
918 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
919 .dst_reg = EBPF_REG_2,
923 .code = (BPF_LDX | BPF_MEM | BPF_H),
924 .dst_reg = EBPF_REG_3,
925 .src_reg = EBPF_REG_1,
929 .code = (BPF_JMP | EBPF_JNE | BPF_K),
930 .dst_reg = EBPF_REG_3,
935 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
936 .dst_reg = EBPF_REG_2,
940 .code = (BPF_LDX | BPF_MEM | BPF_H),
941 .dst_reg = EBPF_REG_3,
942 .src_reg = EBPF_REG_1,
946 .code = (EBPF_ALU64 | BPF_AND | BPF_K),
947 .dst_reg = EBPF_REG_3,
951 .code = (BPF_JMP | EBPF_JNE | BPF_K),
952 .dst_reg = EBPF_REG_3,
957 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
958 .dst_reg = EBPF_REG_1,
959 .src_reg = EBPF_REG_2,
962 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
963 .dst_reg = EBPF_REG_0,
967 .code = (BPF_LDX | BPF_MEM | BPF_W),
968 .dst_reg = EBPF_REG_1,
969 .src_reg = EBPF_REG_1,
973 .code = (BPF_ALU | EBPF_MOV | BPF_K),
974 .dst_reg = EBPF_REG_3,
978 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
979 .dst_reg = EBPF_REG_3,
980 .imm = sizeof(uint32_t) * CHAR_BIT,
983 .code = (BPF_ALU | BPF_AND | BPF_X),
984 .dst_reg = EBPF_REG_1,
985 .src_reg = EBPF_REG_3,
988 .code = (BPF_ALU | EBPF_MOV | BPF_K),
989 .dst_reg = EBPF_REG_3,
993 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
994 .dst_reg = EBPF_REG_3,
995 .imm = sizeof(uint32_t) * CHAR_BIT,
998 .code = (BPF_JMP | BPF_JEQ | BPF_X),
999 .dst_reg = EBPF_REG_1,
1000 .src_reg = EBPF_REG_3,
1004 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1005 .dst_reg = EBPF_REG_0,
1009 .code = (BPF_JMP | EBPF_EXIT),
1013 /* Preparing a vlan packet */
1015 test_jump2_prepare(void *arg)
1017 struct dummy_net *dn;
1020 memset(dn, 0, sizeof(*dn));
1023 * Initialize ether header.
1025 rte_ether_addr_copy((struct rte_ether_addr *)dst_mac,
1026 &dn->eth_hdr.dst_addr);
1027 rte_ether_addr_copy((struct rte_ether_addr *)src_mac,
1028 &dn->eth_hdr.src_addr);
1029 dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1032 * Initialize vlan header.
1034 dn->vlan_hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1035 dn->vlan_hdr.vlan_tci = 32;
1038 * Initialize IP header.
1040 dn->ip_hdr.version_ihl = 0x45; /*IP_VERSION | IP_HDRLEN*/
1041 dn->ip_hdr.time_to_live = 64; /* IP_DEFTTL */
1042 dn->ip_hdr.next_proto_id = IPPROTO_TCP;
1043 dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c);
1044 dn->ip_hdr.total_length = rte_cpu_to_be_16(60);
1045 dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr);
1046 dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr);
1050 test_jump2_check(uint64_t rc, const void *arg)
1052 const struct rte_ether_hdr *eth_hdr = arg;
1053 const struct rte_ipv4_hdr *ipv4_hdr;
1054 const void *next = eth_hdr;
1058 if (eth_hdr->ether_type == htons(0x8100)) {
1059 const struct rte_vlan_hdr *vlan_hdr =
1060 (const void *)(eth_hdr + 1);
1061 eth_type = vlan_hdr->eth_proto;
1062 next = vlan_hdr + 1;
1064 eth_type = eth_hdr->ether_type;
1068 if (eth_type == htons(0x0800)) {
1070 if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) ==
1071 rte_cpu_to_be_32(TEST_SUBNET)) {
1076 return cmp_res(__func__, v, rc, arg, arg, sizeof(arg));
1079 /* alu (add, sub, and, or, xor, neg) test-cases */
1080 static const struct ebpf_insn test_alu1_prog[] = {
1083 .code = (BPF_LDX | BPF_MEM | BPF_W),
1084 .dst_reg = EBPF_REG_2,
1085 .src_reg = EBPF_REG_1,
1086 .off = offsetof(struct dummy_vect8, in[0].u32),
1089 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1090 .dst_reg = EBPF_REG_3,
1091 .src_reg = EBPF_REG_1,
1092 .off = offsetof(struct dummy_vect8, in[0].u64),
1095 .code = (BPF_LDX | BPF_MEM | BPF_W),
1096 .dst_reg = EBPF_REG_4,
1097 .src_reg = EBPF_REG_1,
1098 .off = offsetof(struct dummy_vect8, in[1].u32),
1101 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1102 .dst_reg = EBPF_REG_5,
1103 .src_reg = EBPF_REG_1,
1104 .off = offsetof(struct dummy_vect8, in[1].u64),
1107 .code = (BPF_ALU | BPF_AND | BPF_K),
1108 .dst_reg = EBPF_REG_2,
1112 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1113 .dst_reg = EBPF_REG_3,
1117 .code = (BPF_ALU | BPF_XOR | BPF_K),
1118 .dst_reg = EBPF_REG_4,
1122 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1123 .dst_reg = EBPF_REG_5,
1127 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1128 .dst_reg = EBPF_REG_1,
1129 .src_reg = EBPF_REG_2,
1130 .off = offsetof(struct dummy_vect8, out[0].u64),
1133 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1134 .dst_reg = EBPF_REG_1,
1135 .src_reg = EBPF_REG_3,
1136 .off = offsetof(struct dummy_vect8, out[1].u64),
1139 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1140 .dst_reg = EBPF_REG_1,
1141 .src_reg = EBPF_REG_4,
1142 .off = offsetof(struct dummy_vect8, out[2].u64),
1145 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1146 .dst_reg = EBPF_REG_1,
1147 .src_reg = EBPF_REG_5,
1148 .off = offsetof(struct dummy_vect8, out[3].u64),
1151 .code = (BPF_ALU | BPF_OR | BPF_X),
1152 .dst_reg = EBPF_REG_2,
1153 .src_reg = EBPF_REG_3,
1156 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
1157 .dst_reg = EBPF_REG_3,
1158 .src_reg = EBPF_REG_4,
1161 .code = (BPF_ALU | BPF_SUB | BPF_X),
1162 .dst_reg = EBPF_REG_4,
1163 .src_reg = EBPF_REG_5,
1166 .code = (EBPF_ALU64 | BPF_AND | BPF_X),
1167 .dst_reg = EBPF_REG_5,
1168 .src_reg = EBPF_REG_2,
1171 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1172 .dst_reg = EBPF_REG_1,
1173 .src_reg = EBPF_REG_2,
1174 .off = offsetof(struct dummy_vect8, out[4].u64),
1177 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1178 .dst_reg = EBPF_REG_1,
1179 .src_reg = EBPF_REG_3,
1180 .off = offsetof(struct dummy_vect8, out[5].u64),
1183 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1184 .dst_reg = EBPF_REG_1,
1185 .src_reg = EBPF_REG_4,
1186 .off = offsetof(struct dummy_vect8, out[6].u64),
1189 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1190 .dst_reg = EBPF_REG_1,
1191 .src_reg = EBPF_REG_5,
1192 .off = offsetof(struct dummy_vect8, out[7].u64),
1194 /* return (-r2 + (-r3)) */
1196 .code = (BPF_ALU | BPF_NEG),
1197 .dst_reg = EBPF_REG_2,
1200 .code = (EBPF_ALU64 | BPF_NEG),
1201 .dst_reg = EBPF_REG_3,
1204 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1205 .dst_reg = EBPF_REG_2,
1206 .src_reg = EBPF_REG_3,
1209 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1210 .dst_reg = EBPF_REG_0,
1211 .src_reg = EBPF_REG_2,
1214 .code = (BPF_JMP | EBPF_EXIT),
1219 test_alu1_check(uint64_t rc, const void *arg)
1221 uint64_t r2, r3, r4, r5, rv;
1222 const struct dummy_vect8 *dvt;
1223 struct dummy_vect8 dve;
1226 memset(&dve, 0, sizeof(dve));
1228 r2 = dvt->in[0].u32;
1229 r3 = dvt->in[0].u64;
1230 r4 = dvt->in[1].u32;
1231 r5 = dvt->in[1].u64;
1233 r2 = (uint32_t)r2 & TEST_FILL_1;
1234 r3 |= (int32_t) TEST_FILL_1;
1235 r4 = (uint32_t)r4 ^ TEST_FILL_1;
1236 r5 += (int32_t)TEST_FILL_1;
1238 dve.out[0].u64 = r2;
1239 dve.out[1].u64 = r3;
1240 dve.out[2].u64 = r4;
1241 dve.out[3].u64 = r5;
1243 r2 = (uint32_t)r2 | (uint32_t)r3;
1245 r4 = (uint32_t)r4 - (uint32_t)r5;
1248 dve.out[4].u64 = r2;
1249 dve.out[5].u64 = r3;
1250 dve.out[6].u64 = r4;
1251 dve.out[7].u64 = r5;
1258 return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1261 /* endianness conversions (BE->LE/LE->BE) test-cases */
1262 static const struct ebpf_insn test_bele1_prog[] = {
1265 .code = (BPF_LDX | BPF_MEM | BPF_H),
1266 .dst_reg = EBPF_REG_2,
1267 .src_reg = EBPF_REG_1,
1268 .off = offsetof(struct dummy_vect8, in[0].u16),
1271 .code = (BPF_LDX | BPF_MEM | BPF_W),
1272 .dst_reg = EBPF_REG_3,
1273 .src_reg = EBPF_REG_1,
1274 .off = offsetof(struct dummy_vect8, in[0].u32),
1277 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1278 .dst_reg = EBPF_REG_4,
1279 .src_reg = EBPF_REG_1,
1280 .off = offsetof(struct dummy_vect8, in[0].u64),
1283 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1284 .dst_reg = EBPF_REG_2,
1285 .imm = sizeof(uint16_t) * CHAR_BIT,
1288 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1289 .dst_reg = EBPF_REG_3,
1290 .imm = sizeof(uint32_t) * CHAR_BIT,
1293 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1294 .dst_reg = EBPF_REG_4,
1295 .imm = sizeof(uint64_t) * CHAR_BIT,
1298 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1299 .dst_reg = EBPF_REG_1,
1300 .src_reg = EBPF_REG_2,
1301 .off = offsetof(struct dummy_vect8, out[0].u64),
1304 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1305 .dst_reg = EBPF_REG_1,
1306 .src_reg = EBPF_REG_3,
1307 .off = offsetof(struct dummy_vect8, out[1].u64),
1310 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1311 .dst_reg = EBPF_REG_1,
1312 .src_reg = EBPF_REG_4,
1313 .off = offsetof(struct dummy_vect8, out[2].u64),
1316 .code = (BPF_LDX | BPF_MEM | BPF_H),
1317 .dst_reg = EBPF_REG_2,
1318 .src_reg = EBPF_REG_1,
1319 .off = offsetof(struct dummy_vect8, in[0].u16),
1322 .code = (BPF_LDX | BPF_MEM | BPF_W),
1323 .dst_reg = EBPF_REG_3,
1324 .src_reg = EBPF_REG_1,
1325 .off = offsetof(struct dummy_vect8, in[0].u32),
1328 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1329 .dst_reg = EBPF_REG_4,
1330 .src_reg = EBPF_REG_1,
1331 .off = offsetof(struct dummy_vect8, in[0].u64),
1334 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1335 .dst_reg = EBPF_REG_2,
1336 .imm = sizeof(uint16_t) * CHAR_BIT,
1339 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1340 .dst_reg = EBPF_REG_3,
1341 .imm = sizeof(uint32_t) * CHAR_BIT,
1344 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1345 .dst_reg = EBPF_REG_4,
1346 .imm = sizeof(uint64_t) * CHAR_BIT,
1349 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1350 .dst_reg = EBPF_REG_1,
1351 .src_reg = EBPF_REG_2,
1352 .off = offsetof(struct dummy_vect8, out[3].u64),
1355 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1356 .dst_reg = EBPF_REG_1,
1357 .src_reg = EBPF_REG_3,
1358 .off = offsetof(struct dummy_vect8, out[4].u64),
1361 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1362 .dst_reg = EBPF_REG_1,
1363 .src_reg = EBPF_REG_4,
1364 .off = offsetof(struct dummy_vect8, out[5].u64),
1368 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1369 .dst_reg = EBPF_REG_0,
1373 .code = (BPF_JMP | EBPF_EXIT),
1378 test_bele1_prepare(void *arg)
1380 struct dummy_vect8 *dv;
1384 memset(dv, 0, sizeof(*dv));
1385 dv->in[0].u64 = rte_rand();
1386 dv->in[0].u32 = dv->in[0].u64;
1387 dv->in[0].u16 = dv->in[0].u64;
1391 test_bele1_check(uint64_t rc, const void *arg)
1393 uint64_t r2, r3, r4;
1394 const struct dummy_vect8 *dvt;
1395 struct dummy_vect8 dve;
1398 memset(&dve, 0, sizeof(dve));
1400 r2 = dvt->in[0].u16;
1401 r3 = dvt->in[0].u32;
1402 r4 = dvt->in[0].u64;
1404 r2 = rte_cpu_to_be_16(r2);
1405 r3 = rte_cpu_to_be_32(r3);
1406 r4 = rte_cpu_to_be_64(r4);
1408 dve.out[0].u64 = r2;
1409 dve.out[1].u64 = r3;
1410 dve.out[2].u64 = r4;
1412 r2 = dvt->in[0].u16;
1413 r3 = dvt->in[0].u32;
1414 r4 = dvt->in[0].u64;
1416 r2 = rte_cpu_to_le_16(r2);
1417 r3 = rte_cpu_to_le_32(r3);
1418 r4 = rte_cpu_to_le_64(r4);
1420 dve.out[3].u64 = r2;
1421 dve.out[4].u64 = r3;
1422 dve.out[5].u64 = r4;
1424 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1427 /* atomic add test-cases */
1428 static const struct ebpf_insn test_xadd1_prog[] = {
1431 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1432 .dst_reg = EBPF_REG_2,
1436 .code = (BPF_STX | EBPF_XADD | BPF_W),
1437 .dst_reg = EBPF_REG_1,
1438 .src_reg = EBPF_REG_2,
1439 .off = offsetof(struct dummy_offset, u32),
1442 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1443 .dst_reg = EBPF_REG_1,
1444 .src_reg = EBPF_REG_2,
1445 .off = offsetof(struct dummy_offset, u64),
1448 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1449 .dst_reg = EBPF_REG_3,
1453 .code = (BPF_STX | EBPF_XADD | BPF_W),
1454 .dst_reg = EBPF_REG_1,
1455 .src_reg = EBPF_REG_3,
1456 .off = offsetof(struct dummy_offset, u32),
1459 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1460 .dst_reg = EBPF_REG_1,
1461 .src_reg = EBPF_REG_3,
1462 .off = offsetof(struct dummy_offset, u64),
1465 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1466 .dst_reg = EBPF_REG_4,
1470 .code = (BPF_STX | EBPF_XADD | BPF_W),
1471 .dst_reg = EBPF_REG_1,
1472 .src_reg = EBPF_REG_4,
1473 .off = offsetof(struct dummy_offset, u32),
1476 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1477 .dst_reg = EBPF_REG_1,
1478 .src_reg = EBPF_REG_4,
1479 .off = offsetof(struct dummy_offset, u64),
1482 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1483 .dst_reg = EBPF_REG_5,
1487 .code = (BPF_STX | EBPF_XADD | BPF_W),
1488 .dst_reg = EBPF_REG_1,
1489 .src_reg = EBPF_REG_5,
1490 .off = offsetof(struct dummy_offset, u32),
1493 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1494 .dst_reg = EBPF_REG_1,
1495 .src_reg = EBPF_REG_5,
1496 .off = offsetof(struct dummy_offset, u64),
1499 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1500 .dst_reg = EBPF_REG_6,
1504 .code = (BPF_STX | EBPF_XADD | BPF_W),
1505 .dst_reg = EBPF_REG_1,
1506 .src_reg = EBPF_REG_6,
1507 .off = offsetof(struct dummy_offset, u32),
1510 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1511 .dst_reg = EBPF_REG_1,
1512 .src_reg = EBPF_REG_6,
1513 .off = offsetof(struct dummy_offset, u64),
1516 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1517 .dst_reg = EBPF_REG_7,
1521 .code = (BPF_STX | EBPF_XADD | BPF_W),
1522 .dst_reg = EBPF_REG_1,
1523 .src_reg = EBPF_REG_7,
1524 .off = offsetof(struct dummy_offset, u32),
1527 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1528 .dst_reg = EBPF_REG_1,
1529 .src_reg = EBPF_REG_7,
1530 .off = offsetof(struct dummy_offset, u64),
1533 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1534 .dst_reg = EBPF_REG_8,
1538 .code = (BPF_STX | EBPF_XADD | BPF_W),
1539 .dst_reg = EBPF_REG_1,
1540 .src_reg = EBPF_REG_8,
1541 .off = offsetof(struct dummy_offset, u32),
1544 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1545 .dst_reg = EBPF_REG_1,
1546 .src_reg = EBPF_REG_8,
1547 .off = offsetof(struct dummy_offset, u64),
1551 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1552 .dst_reg = EBPF_REG_0,
1556 .code = (BPF_JMP | EBPF_EXIT),
1561 test_xadd1_check(uint64_t rc, const void *arg)
1564 const struct dummy_offset *dft;
1565 struct dummy_offset dfe;
1568 memset(&dfe, 0, sizeof(dfe));
1571 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1572 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1575 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1576 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1578 rv = (int32_t)TEST_FILL_1;
1579 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1580 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1583 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1584 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1587 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1588 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1591 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1592 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1595 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1596 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1598 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1601 /* alu div test-cases */
1602 static const struct ebpf_insn test_div1_prog[] = {
1605 .code = (BPF_LDX | BPF_MEM | BPF_W),
1606 .dst_reg = EBPF_REG_2,
1607 .src_reg = EBPF_REG_1,
1608 .off = offsetof(struct dummy_vect8, in[0].u32),
1611 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1612 .dst_reg = EBPF_REG_3,
1613 .src_reg = EBPF_REG_1,
1614 .off = offsetof(struct dummy_vect8, in[1].u64),
1617 .code = (BPF_LDX | BPF_MEM | BPF_W),
1618 .dst_reg = EBPF_REG_4,
1619 .src_reg = EBPF_REG_1,
1620 .off = offsetof(struct dummy_vect8, in[2].u32),
1623 .code = (BPF_ALU | BPF_DIV | BPF_K),
1624 .dst_reg = EBPF_REG_2,
1628 .code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1629 .dst_reg = EBPF_REG_3,
1633 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1634 .dst_reg = EBPF_REG_2,
1638 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1639 .dst_reg = EBPF_REG_3,
1643 .code = (BPF_ALU | BPF_MOD | BPF_X),
1644 .dst_reg = EBPF_REG_4,
1645 .src_reg = EBPF_REG_2,
1648 .code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1649 .dst_reg = EBPF_REG_4,
1650 .src_reg = EBPF_REG_3,
1653 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1654 .dst_reg = EBPF_REG_1,
1655 .src_reg = EBPF_REG_2,
1656 .off = offsetof(struct dummy_vect8, out[0].u64),
1659 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1660 .dst_reg = EBPF_REG_1,
1661 .src_reg = EBPF_REG_3,
1662 .off = offsetof(struct dummy_vect8, out[1].u64),
1665 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1666 .dst_reg = EBPF_REG_1,
1667 .src_reg = EBPF_REG_4,
1668 .off = offsetof(struct dummy_vect8, out[2].u64),
1670 /* check that we can handle division by zero gracefully. */
1672 .code = (BPF_LDX | BPF_MEM | BPF_W),
1673 .dst_reg = EBPF_REG_2,
1674 .src_reg = EBPF_REG_1,
1675 .off = offsetof(struct dummy_vect8, in[3].u32),
1678 .code = (BPF_ALU | BPF_DIV | BPF_X),
1679 .dst_reg = EBPF_REG_4,
1680 .src_reg = EBPF_REG_2,
1684 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1685 .dst_reg = EBPF_REG_0,
1689 .code = (BPF_JMP | EBPF_EXIT),
1694 test_div1_check(uint64_t rc, const void *arg)
1696 uint64_t r2, r3, r4;
1697 const struct dummy_vect8 *dvt;
1698 struct dummy_vect8 dve;
1701 memset(&dve, 0, sizeof(dve));
1703 r2 = dvt->in[0].u32;
1704 r3 = dvt->in[1].u64;
1705 r4 = dvt->in[2].u32;
1707 r2 = (uint32_t)r2 / TEST_MUL_1;
1711 r4 = (uint32_t)(r4 % r2);
1714 dve.out[0].u64 = r2;
1715 dve.out[1].u64 = r3;
1716 dve.out[2].u64 = r4;
1719 * in the test prog we attempted to divide by zero.
1720 * so return value should return 0.
1722 return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1725 /* call test-cases */
1726 static const struct ebpf_insn test_call1_prog[] = {
1729 .code = (BPF_LDX | BPF_MEM | BPF_W),
1730 .dst_reg = EBPF_REG_2,
1731 .src_reg = EBPF_REG_1,
1732 .off = offsetof(struct dummy_offset, u32),
1735 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1736 .dst_reg = EBPF_REG_3,
1737 .src_reg = EBPF_REG_1,
1738 .off = offsetof(struct dummy_offset, u64),
1741 .code = (BPF_STX | BPF_MEM | BPF_W),
1742 .dst_reg = EBPF_REG_10,
1743 .src_reg = EBPF_REG_2,
1747 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1748 .dst_reg = EBPF_REG_10,
1749 .src_reg = EBPF_REG_3,
1753 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1754 .dst_reg = EBPF_REG_2,
1755 .src_reg = EBPF_REG_10,
1758 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1759 .dst_reg = EBPF_REG_2,
1763 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1764 .dst_reg = EBPF_REG_3,
1765 .src_reg = EBPF_REG_10,
1768 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1769 .dst_reg = EBPF_REG_3,
1773 .code = (BPF_JMP | EBPF_CALL),
1777 .code = (BPF_LDX | BPF_MEM | BPF_W),
1778 .dst_reg = EBPF_REG_2,
1779 .src_reg = EBPF_REG_10,
1783 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1784 .dst_reg = EBPF_REG_0,
1785 .src_reg = EBPF_REG_10,
1789 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1790 .dst_reg = EBPF_REG_0,
1791 .src_reg = EBPF_REG_2,
1794 .code = (BPF_JMP | EBPF_EXIT),
1799 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1801 const struct dummy_offset *dv;
1810 test_call1_check(uint64_t rc, const void *arg)
1814 const struct dummy_offset *dv;
1820 dummy_func1(arg, &v32, &v64);
1823 return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1826 static const struct rte_bpf_xsym test_call1_xsym[] = {
1828 .name = RTE_STR(dummy_func1),
1829 .type = RTE_BPF_XTYPE_FUNC,
1831 .val = (void *)dummy_func1,
1835 .type = RTE_BPF_ARG_PTR,
1836 .size = sizeof(struct dummy_offset),
1839 .type = RTE_BPF_ARG_PTR,
1840 .size = sizeof(uint32_t),
1843 .type = RTE_BPF_ARG_PTR,
1844 .size = sizeof(uint64_t),
1851 static const struct ebpf_insn test_call2_prog[] = {
1854 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1855 .dst_reg = EBPF_REG_1,
1856 .src_reg = EBPF_REG_10,
1859 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1860 .dst_reg = EBPF_REG_1,
1861 .imm = -(int32_t)sizeof(struct dummy_offset),
1864 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1865 .dst_reg = EBPF_REG_2,
1866 .src_reg = EBPF_REG_10,
1869 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1870 .dst_reg = EBPF_REG_2,
1871 .imm = -2 * (int32_t)sizeof(struct dummy_offset),
1874 .code = (BPF_JMP | EBPF_CALL),
1878 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1879 .dst_reg = EBPF_REG_1,
1880 .src_reg = EBPF_REG_10,
1881 .off = -(int32_t)(sizeof(struct dummy_offset) -
1882 offsetof(struct dummy_offset, u64)),
1885 .code = (BPF_LDX | BPF_MEM | BPF_W),
1886 .dst_reg = EBPF_REG_0,
1887 .src_reg = EBPF_REG_10,
1888 .off = -(int32_t)(sizeof(struct dummy_offset) -
1889 offsetof(struct dummy_offset, u32)),
1892 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1893 .dst_reg = EBPF_REG_0,
1894 .src_reg = EBPF_REG_1,
1897 .code = (BPF_LDX | BPF_MEM | BPF_H),
1898 .dst_reg = EBPF_REG_1,
1899 .src_reg = EBPF_REG_10,
1900 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1901 offsetof(struct dummy_offset, u16)),
1904 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1905 .dst_reg = EBPF_REG_0,
1906 .src_reg = EBPF_REG_1,
1909 .code = (BPF_LDX | BPF_MEM | BPF_B),
1910 .dst_reg = EBPF_REG_1,
1911 .src_reg = EBPF_REG_10,
1912 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1913 offsetof(struct dummy_offset, u8)),
1916 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1917 .dst_reg = EBPF_REG_0,
1918 .src_reg = EBPF_REG_1,
1921 .code = (BPF_JMP | EBPF_EXIT),
1927 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1943 test_call2_check(uint64_t rc, const void *arg)
1946 struct dummy_offset a, b;
1950 dummy_func2(&a, &b);
1951 v = a.u64 + a.u32 + b.u16 + b.u8;
1953 return cmp_res(__func__, v, rc, arg, arg, 0);
1956 static const struct rte_bpf_xsym test_call2_xsym[] = {
1958 .name = RTE_STR(dummy_func2),
1959 .type = RTE_BPF_XTYPE_FUNC,
1961 .val = (void *)dummy_func2,
1965 .type = RTE_BPF_ARG_PTR,
1966 .size = sizeof(struct dummy_offset),
1969 .type = RTE_BPF_ARG_PTR,
1970 .size = sizeof(struct dummy_offset),
1977 static const struct ebpf_insn test_call3_prog[] = {
1980 .code = (BPF_JMP | EBPF_CALL),
1984 .code = (BPF_LDX | BPF_MEM | BPF_B),
1985 .dst_reg = EBPF_REG_2,
1986 .src_reg = EBPF_REG_0,
1987 .off = offsetof(struct dummy_offset, u8),
1990 .code = (BPF_LDX | BPF_MEM | BPF_H),
1991 .dst_reg = EBPF_REG_3,
1992 .src_reg = EBPF_REG_0,
1993 .off = offsetof(struct dummy_offset, u16),
1996 .code = (BPF_LDX | BPF_MEM | BPF_W),
1997 .dst_reg = EBPF_REG_4,
1998 .src_reg = EBPF_REG_0,
1999 .off = offsetof(struct dummy_offset, u32),
2002 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
2003 .dst_reg = EBPF_REG_0,
2004 .src_reg = EBPF_REG_0,
2005 .off = offsetof(struct dummy_offset, u64),
2009 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2010 .dst_reg = EBPF_REG_0,
2011 .src_reg = EBPF_REG_4,
2014 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2015 .dst_reg = EBPF_REG_0,
2016 .src_reg = EBPF_REG_3,
2019 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2020 .dst_reg = EBPF_REG_0,
2021 .src_reg = EBPF_REG_2,
2024 .code = (BPF_JMP | EBPF_EXIT),
2028 static const struct dummy_offset *
2029 dummy_func3(const struct dummy_vect8 *p)
2031 return &p->in[RTE_DIM(p->in) - 1];
2035 test_call3_prepare(void *arg)
2037 struct dummy_vect8 *pv;
2038 struct dummy_offset *df;
2041 df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv);
2043 memset(pv, 0, sizeof(*pv));
2044 df->u64 = (int32_t)TEST_FILL_1;
2051 test_call3_check(uint64_t rc, const void *arg)
2054 const struct dummy_vect8 *pv;
2055 const struct dummy_offset *dft;
2058 dft = dummy_func3(pv);
2065 return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv));
2068 static const struct rte_bpf_xsym test_call3_xsym[] = {
2070 .name = RTE_STR(dummy_func3),
2071 .type = RTE_BPF_XTYPE_FUNC,
2073 .val = (void *)dummy_func3,
2077 .type = RTE_BPF_ARG_PTR,
2078 .size = sizeof(struct dummy_vect8),
2082 .type = RTE_BPF_ARG_PTR,
2083 .size = sizeof(struct dummy_offset),
2089 /* Test for stack corruption in multiple function calls */
2090 static const struct ebpf_insn test_call4_prog[] = {
2092 .code = (BPF_ST | BPF_MEM | BPF_B),
2093 .dst_reg = EBPF_REG_10,
2098 .code = (BPF_ST | BPF_MEM | BPF_B),
2099 .dst_reg = EBPF_REG_10,
2104 .code = (BPF_ST | BPF_MEM | BPF_B),
2105 .dst_reg = EBPF_REG_10,
2110 .code = (BPF_ST | BPF_MEM | BPF_B),
2111 .dst_reg = EBPF_REG_10,
2116 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2117 .dst_reg = EBPF_REG_1,
2118 .src_reg = EBPF_REG_10,
2121 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2122 .dst_reg = EBPF_REG_2,
2126 .code = (EBPF_ALU64 | BPF_SUB | BPF_X),
2127 .dst_reg = EBPF_REG_1,
2128 .src_reg = EBPF_REG_2,
2131 .code = (BPF_JMP | EBPF_CALL),
2135 .code = (BPF_LDX | BPF_MEM | BPF_B),
2136 .dst_reg = EBPF_REG_1,
2137 .src_reg = EBPF_REG_10,
2141 .code = (BPF_LDX | BPF_MEM | BPF_B),
2142 .dst_reg = EBPF_REG_2,
2143 .src_reg = EBPF_REG_10,
2147 .code = (BPF_LDX | BPF_MEM | BPF_B),
2148 .dst_reg = EBPF_REG_3,
2149 .src_reg = EBPF_REG_10,
2153 .code = (BPF_LDX | BPF_MEM | BPF_B),
2154 .dst_reg = EBPF_REG_4,
2155 .src_reg = EBPF_REG_10,
2159 .code = (BPF_JMP | EBPF_CALL),
2163 .code = (EBPF_ALU64 | BPF_XOR | BPF_K),
2164 .dst_reg = EBPF_REG_0,
2165 .imm = TEST_MEMFROB,
2168 .code = (BPF_JMP | EBPF_EXIT),
2172 /* Gathering the bytes together */
2174 dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
2176 return (a << 24) | (b << 16) | (c << 8) | (d << 0);
2179 /* Implementation of memfrob */
2181 dummy_func4_0(uint32_t *s, uint8_t n)
2183 char *p = (char *) s;
2191 test_call4_check(uint64_t rc, const void *arg)
2193 uint8_t a[4] = {1, 2, 3, 4};
2198 s = dummy_func4_0((uint32_t *)a, 4);
2200 s = dummy_func4_1(a[0], a[1], a[2], a[3]);
2202 v = s ^ TEST_MEMFROB;
2204 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2207 static const struct rte_bpf_xsym test_call4_xsym[] = {
2209 .name = RTE_STR(dummy_func4_0),
2210 .type = RTE_BPF_XTYPE_FUNC,
2212 .val = (void *)dummy_func4_0,
2216 .type = RTE_BPF_ARG_PTR,
2217 .size = 4 * sizeof(uint8_t),
2220 .type = RTE_BPF_ARG_RAW,
2221 .size = sizeof(uint8_t),
2225 .type = RTE_BPF_ARG_RAW,
2226 .size = sizeof(uint32_t),
2231 .name = RTE_STR(dummy_func4_1),
2232 .type = RTE_BPF_XTYPE_FUNC,
2234 .val = (void *)dummy_func4_1,
2238 .type = RTE_BPF_ARG_RAW,
2239 .size = sizeof(uint8_t),
2242 .type = RTE_BPF_ARG_RAW,
2243 .size = sizeof(uint8_t),
2246 .type = RTE_BPF_ARG_RAW,
2247 .size = sizeof(uint8_t),
2250 .type = RTE_BPF_ARG_RAW,
2251 .size = sizeof(uint8_t),
2255 .type = RTE_BPF_ARG_RAW,
2256 .size = sizeof(uint32_t),
2262 /* string compare test case */
2263 static const struct ebpf_insn test_call5_prog[] = {
2266 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2267 .dst_reg = EBPF_REG_1,
2271 .code = (BPF_STX | BPF_MEM | BPF_W),
2272 .dst_reg = EBPF_REG_10,
2273 .src_reg = EBPF_REG_1,
2277 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2278 .dst_reg = EBPF_REG_6,
2282 .code = (BPF_STX | BPF_MEM | BPF_B),
2283 .dst_reg = EBPF_REG_10,
2284 .src_reg = EBPF_REG_6,
2288 .code = (BPF_STX | BPF_MEM | BPF_W),
2289 .dst_reg = EBPF_REG_10,
2290 .src_reg = EBPF_REG_6,
2294 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2295 .dst_reg = EBPF_REG_1,
2299 .code = (BPF_STX | BPF_MEM | BPF_W),
2300 .dst_reg = EBPF_REG_10,
2301 .src_reg = EBPF_REG_1,
2305 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2306 .dst_reg = EBPF_REG_1,
2307 .src_reg = EBPF_REG_10,
2310 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2311 .dst_reg = EBPF_REG_1,
2315 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2316 .dst_reg = EBPF_REG_2,
2317 .src_reg = EBPF_REG_1,
2320 .code = (BPF_JMP | EBPF_CALL),
2324 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2325 .dst_reg = EBPF_REG_1,
2326 .src_reg = EBPF_REG_0,
2329 .code = (BPF_ALU | EBPF_MOV | BPF_K),
2330 .dst_reg = EBPF_REG_0,
2334 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2335 .dst_reg = EBPF_REG_1,
2339 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2340 .dst_reg = EBPF_REG_1,
2344 .code = (BPF_JMP | EBPF_JNE | BPF_K),
2345 .dst_reg = EBPF_REG_1,
2350 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2351 .dst_reg = EBPF_REG_1,
2352 .src_reg = EBPF_REG_10,
2355 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2356 .dst_reg = EBPF_REG_1,
2360 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2361 .dst_reg = EBPF_REG_2,
2362 .src_reg = EBPF_REG_10,
2365 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2366 .dst_reg = EBPF_REG_2,
2370 .code = (BPF_JMP | EBPF_CALL),
2374 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2375 .dst_reg = EBPF_REG_1,
2376 .src_reg = EBPF_REG_0,
2379 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2380 .dst_reg = EBPF_REG_1,
2384 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2385 .dst_reg = EBPF_REG_1,
2389 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2390 .dst_reg = EBPF_REG_0,
2391 .src_reg = EBPF_REG_1,
2394 .code = (BPF_JMP | BPF_JEQ | BPF_X),
2395 .dst_reg = EBPF_REG_1,
2396 .src_reg = EBPF_REG_6,
2400 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2401 .dst_reg = EBPF_REG_0,
2405 .code = (BPF_JMP | EBPF_EXIT),
2409 /* String comparision impelementation, return 0 if equal else difference */
2411 dummy_func5(const char *s1, const char *s2)
2413 while (*s1 && (*s1 == *s2)) {
2417 return *(const unsigned char *)s1 - *(const unsigned char *)s2;
2421 test_call5_check(uint64_t rc, const void *arg)
2429 v = dummy_func5(a, a);
2435 v = dummy_func5(a, b);
2442 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2445 static const struct rte_bpf_xsym test_call5_xsym[] = {
2447 .name = RTE_STR(dummy_func5),
2448 .type = RTE_BPF_XTYPE_FUNC,
2450 .val = (void *)dummy_func5,
2454 .type = RTE_BPF_ARG_PTR,
2455 .size = sizeof(char),
2458 .type = RTE_BPF_ARG_PTR,
2459 .size = sizeof(char),
2463 .type = RTE_BPF_ARG_RAW,
2464 .size = sizeof(uint32_t),
2470 /* load mbuf (BPF_ABS/BPF_IND) test-cases */
2471 static const struct ebpf_insn test_ld_mbuf1_prog[] = {
2473 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2475 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2476 .dst_reg = EBPF_REG_6,
2477 .src_reg = EBPF_REG_1,
2479 /* load IPv4 version and IHL */
2481 .code = (BPF_LD | BPF_ABS | BPF_B),
2482 .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2484 /* check IP version */
2486 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2487 .dst_reg = EBPF_REG_2,
2488 .src_reg = EBPF_REG_0,
2491 .code = (BPF_ALU | BPF_AND | BPF_K),
2492 .dst_reg = EBPF_REG_2,
2496 .code = (BPF_JMP | BPF_JEQ | BPF_K),
2497 .dst_reg = EBPF_REG_2,
2498 .imm = IPVERSION << 4,
2501 /* invalid IP version, return 0 */
2503 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2504 .dst_reg = EBPF_REG_0,
2505 .src_reg = EBPF_REG_0,
2508 .code = (BPF_JMP | EBPF_EXIT),
2510 /* load 3-rd byte of IP data */
2512 .code = (BPF_ALU | BPF_AND | BPF_K),
2513 .dst_reg = EBPF_REG_0,
2514 .imm = RTE_IPV4_HDR_IHL_MASK,
2517 .code = (BPF_ALU | BPF_LSH | BPF_K),
2518 .dst_reg = EBPF_REG_0,
2522 .code = (BPF_LD | BPF_IND | BPF_B),
2523 .src_reg = EBPF_REG_0,
2527 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2528 .dst_reg = EBPF_REG_7,
2529 .src_reg = EBPF_REG_0,
2531 /* load IPv4 src addr */
2533 .code = (BPF_LD | BPF_ABS | BPF_W),
2534 .imm = offsetof(struct rte_ipv4_hdr, src_addr),
2537 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2538 .dst_reg = EBPF_REG_7,
2539 .src_reg = EBPF_REG_0,
2541 /* load IPv4 total length */
2543 .code = (BPF_LD | BPF_ABS | BPF_H),
2544 .imm = offsetof(struct rte_ipv4_hdr, total_length),
2547 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2548 .dst_reg = EBPF_REG_8,
2549 .src_reg = EBPF_REG_0,
2551 /* load last 4 bytes of IP data */
2553 .code = (BPF_LD | BPF_IND | BPF_W),
2554 .src_reg = EBPF_REG_8,
2555 .imm = -(int32_t)sizeof(uint32_t),
2558 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2559 .dst_reg = EBPF_REG_7,
2560 .src_reg = EBPF_REG_0,
2562 /* load 2 bytes from the middle of IP data */
2564 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2565 .dst_reg = EBPF_REG_8,
2569 .code = (BPF_LD | BPF_IND | BPF_H),
2570 .src_reg = EBPF_REG_8,
2573 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2574 .dst_reg = EBPF_REG_0,
2575 .src_reg = EBPF_REG_7,
2578 .code = (BPF_JMP | EBPF_EXIT),
2583 dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
2590 mb->buf_iova = (uintptr_t)buf;
2591 mb->buf_len = buf_len;
2592 rte_mbuf_refcnt_set(mb, 1);
2594 /* set pool pointer to dummy value, test doesn't use it */
2595 mb->pool = (void *)buf;
2597 rte_pktmbuf_reset(mb);
2598 db = (uint8_t *)rte_pktmbuf_append(mb, data_len);
2600 for (i = 0; i != data_len; i++)
2605 test_ld_mbuf1_prepare(void *arg)
2607 struct dummy_mbuf *dm;
2608 struct rte_ipv4_hdr *ph;
2610 const uint32_t plen = 400;
2611 const struct rte_ipv4_hdr iph = {
2612 .version_ihl = RTE_IPV4_VHL_DEF,
2613 .total_length = rte_cpu_to_be_16(plen),
2614 .time_to_live = IPDEFTTL,
2615 .next_proto_id = IPPROTO_RAW,
2616 .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
2617 .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
2621 memset(dm, 0, sizeof(*dm));
2623 dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]),
2625 dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]),
2628 rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]);
2630 ph = rte_pktmbuf_mtod(dm->mb, typeof(ph));
2631 memcpy(ph, &iph, sizeof(iph));
2635 test_ld_mbuf1(const struct rte_mbuf *pkt)
2639 const uint16_t *p16;
2640 const uint32_t *p32;
2641 struct dummy_offset dof;
2643 /* load IPv4 version and IHL */
2644 p8 = rte_pktmbuf_read(pkt,
2645 offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8),
2650 /* check IP version */
2651 if ((p8[0] & 0xf0) != IPVERSION << 4)
2654 n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
2656 /* load 3-rd byte of IP data */
2657 p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof);
2663 /* load IPv4 src addr */
2664 p32 = rte_pktmbuf_read(pkt,
2665 offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32),
2670 v += rte_be_to_cpu_32(p32[0]);
2672 /* load IPv4 total length */
2673 p16 = rte_pktmbuf_read(pkt,
2674 offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16),
2679 n = rte_be_to_cpu_16(p16[0]);
2681 /* load last 4 bytes of IP data */
2682 p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof);
2686 v += rte_be_to_cpu_32(p32[0]);
2688 /* load 2 bytes from the middle of IP data */
2689 p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof);
2693 v += rte_be_to_cpu_16(p16[0]);
2698 test_ld_mbuf1_check(uint64_t rc, const void *arg)
2700 const struct dummy_mbuf *dm;
2704 v = test_ld_mbuf1(dm->mb);
2705 return cmp_res(__func__, v, rc, arg, arg, 0);
2709 * same as ld_mbuf1, but then trancate the mbuf by 1B,
2710 * so load of last 4B fail.
2713 test_ld_mbuf2_prepare(void *arg)
2715 struct dummy_mbuf *dm;
2717 test_ld_mbuf1_prepare(arg);
2719 rte_pktmbuf_trim(dm->mb, 1);
2723 test_ld_mbuf2_check(uint64_t rc, const void *arg)
2725 return cmp_res(__func__, 0, rc, arg, arg, 0);
2728 /* same as test_ld_mbuf1, but now store intermediate results on the stack */
2729 static const struct ebpf_insn test_ld_mbuf3_prog[] = {
2731 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2733 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2734 .dst_reg = EBPF_REG_6,
2735 .src_reg = EBPF_REG_1,
2737 /* load IPv4 version and IHL */
2739 .code = (BPF_LD | BPF_ABS | BPF_B),
2740 .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2742 /* check IP version */
2744 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2745 .dst_reg = EBPF_REG_2,
2746 .src_reg = EBPF_REG_0,
2749 .code = (BPF_ALU | BPF_AND | BPF_K),
2750 .dst_reg = EBPF_REG_2,
2754 .code = (BPF_JMP | BPF_JEQ | BPF_K),
2755 .dst_reg = EBPF_REG_2,
2756 .imm = IPVERSION << 4,
2759 /* invalid IP version, return 0 */
2761 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2762 .dst_reg = EBPF_REG_0,
2763 .src_reg = EBPF_REG_0,
2766 .code = (BPF_JMP | EBPF_EXIT),
2768 /* load 3-rd byte of IP data */
2770 .code = (BPF_ALU | BPF_AND | BPF_K),
2771 .dst_reg = EBPF_REG_0,
2772 .imm = RTE_IPV4_HDR_IHL_MASK,
2775 .code = (BPF_ALU | BPF_LSH | BPF_K),
2776 .dst_reg = EBPF_REG_0,
2780 .code = (BPF_LD | BPF_IND | BPF_B),
2781 .src_reg = EBPF_REG_0,
2785 .code = (BPF_STX | BPF_MEM | BPF_B),
2786 .dst_reg = EBPF_REG_10,
2787 .src_reg = EBPF_REG_0,
2788 .off = (int16_t)(offsetof(struct dummy_offset, u8) -
2789 sizeof(struct dummy_offset)),
2791 /* load IPv4 src addr */
2793 .code = (BPF_LD | BPF_ABS | BPF_W),
2794 .imm = offsetof(struct rte_ipv4_hdr, src_addr),
2797 .code = (BPF_STX | BPF_MEM | BPF_W),
2798 .dst_reg = EBPF_REG_10,
2799 .src_reg = EBPF_REG_0,
2800 .off = (int16_t)(offsetof(struct dummy_offset, u32) -
2801 sizeof(struct dummy_offset)),
2803 /* load IPv4 total length */
2805 .code = (BPF_LD | BPF_ABS | BPF_H),
2806 .imm = offsetof(struct rte_ipv4_hdr, total_length),
2809 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2810 .dst_reg = EBPF_REG_8,
2811 .src_reg = EBPF_REG_0,
2813 /* load last 4 bytes of IP data */
2815 .code = (BPF_LD | BPF_IND | BPF_W),
2816 .src_reg = EBPF_REG_8,
2817 .imm = -(int32_t)sizeof(uint32_t),
2820 .code = (BPF_STX | BPF_MEM | EBPF_DW),
2821 .dst_reg = EBPF_REG_10,
2822 .src_reg = EBPF_REG_0,
2823 .off = (int16_t)(offsetof(struct dummy_offset, u64) -
2824 sizeof(struct dummy_offset)),
2826 /* load 2 bytes from the middle of IP data */
2828 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2829 .dst_reg = EBPF_REG_8,
2833 .code = (BPF_LD | BPF_IND | BPF_H),
2834 .src_reg = EBPF_REG_8,
2837 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
2838 .dst_reg = EBPF_REG_1,
2839 .src_reg = EBPF_REG_10,
2840 .off = (int16_t)(offsetof(struct dummy_offset, u64) -
2841 sizeof(struct dummy_offset)),
2844 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2845 .dst_reg = EBPF_REG_0,
2846 .src_reg = EBPF_REG_1,
2849 .code = (BPF_LDX | BPF_MEM | BPF_W),
2850 .dst_reg = EBPF_REG_1,
2851 .src_reg = EBPF_REG_10,
2852 .off = (int16_t)(offsetof(struct dummy_offset, u32) -
2853 sizeof(struct dummy_offset)),
2856 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2857 .dst_reg = EBPF_REG_0,
2858 .src_reg = EBPF_REG_1,
2861 .code = (BPF_LDX | BPF_MEM | BPF_B),
2862 .dst_reg = EBPF_REG_1,
2863 .src_reg = EBPF_REG_10,
2864 .off = (int16_t)(offsetof(struct dummy_offset, u8) -
2865 sizeof(struct dummy_offset)),
2868 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2869 .dst_reg = EBPF_REG_0,
2870 .src_reg = EBPF_REG_1,
2873 .code = (BPF_JMP | EBPF_EXIT),
2877 /* all bpf test cases */
2878 static const struct bpf_test tests[] = {
2880 .name = "test_store1",
2881 .arg_sz = sizeof(struct dummy_offset),
2883 .ins = test_store1_prog,
2884 .nb_ins = RTE_DIM(test_store1_prog),
2886 .type = RTE_BPF_ARG_PTR,
2887 .size = sizeof(struct dummy_offset),
2890 .prepare = test_store1_prepare,
2891 .check_result = test_store1_check,
2894 .name = "test_store2",
2895 .arg_sz = sizeof(struct dummy_offset),
2897 .ins = test_store2_prog,
2898 .nb_ins = RTE_DIM(test_store2_prog),
2900 .type = RTE_BPF_ARG_PTR,
2901 .size = sizeof(struct dummy_offset),
2904 .prepare = test_store1_prepare,
2905 .check_result = test_store1_check,
2908 .name = "test_load1",
2909 .arg_sz = sizeof(struct dummy_offset),
2911 .ins = test_load1_prog,
2912 .nb_ins = RTE_DIM(test_load1_prog),
2914 .type = RTE_BPF_ARG_PTR,
2915 .size = sizeof(struct dummy_offset),
2918 .prepare = test_load1_prepare,
2919 .check_result = test_load1_check,
2922 .name = "test_ldimm1",
2923 .arg_sz = sizeof(struct dummy_offset),
2925 .ins = test_ldimm1_prog,
2926 .nb_ins = RTE_DIM(test_ldimm1_prog),
2928 .type = RTE_BPF_ARG_PTR,
2929 .size = sizeof(struct dummy_offset),
2932 .prepare = test_store1_prepare,
2933 .check_result = test_ldimm1_check,
2936 .name = "test_mul1",
2937 .arg_sz = sizeof(struct dummy_vect8),
2939 .ins = test_mul1_prog,
2940 .nb_ins = RTE_DIM(test_mul1_prog),
2942 .type = RTE_BPF_ARG_PTR,
2943 .size = sizeof(struct dummy_vect8),
2946 .prepare = test_mul1_prepare,
2947 .check_result = test_mul1_check,
2950 .name = "test_shift1",
2951 .arg_sz = sizeof(struct dummy_vect8),
2953 .ins = test_shift1_prog,
2954 .nb_ins = RTE_DIM(test_shift1_prog),
2956 .type = RTE_BPF_ARG_PTR,
2957 .size = sizeof(struct dummy_vect8),
2960 .prepare = test_shift1_prepare,
2961 .check_result = test_shift1_check,
2964 .name = "test_jump1",
2965 .arg_sz = sizeof(struct dummy_vect8),
2967 .ins = test_jump1_prog,
2968 .nb_ins = RTE_DIM(test_jump1_prog),
2970 .type = RTE_BPF_ARG_PTR,
2971 .size = sizeof(struct dummy_vect8),
2974 .prepare = test_jump1_prepare,
2975 .check_result = test_jump1_check,
2978 .name = "test_jump2",
2979 .arg_sz = sizeof(struct dummy_net),
2981 .ins = test_jump2_prog,
2982 .nb_ins = RTE_DIM(test_jump2_prog),
2984 .type = RTE_BPF_ARG_PTR,
2985 .size = sizeof(struct dummy_net),
2988 .prepare = test_jump2_prepare,
2989 .check_result = test_jump2_check,
2992 .name = "test_alu1",
2993 .arg_sz = sizeof(struct dummy_vect8),
2995 .ins = test_alu1_prog,
2996 .nb_ins = RTE_DIM(test_alu1_prog),
2998 .type = RTE_BPF_ARG_PTR,
2999 .size = sizeof(struct dummy_vect8),
3002 .prepare = test_jump1_prepare,
3003 .check_result = test_alu1_check,
3006 .name = "test_bele1",
3007 .arg_sz = sizeof(struct dummy_vect8),
3009 .ins = test_bele1_prog,
3010 .nb_ins = RTE_DIM(test_bele1_prog),
3012 .type = RTE_BPF_ARG_PTR,
3013 .size = sizeof(struct dummy_vect8),
3016 .prepare = test_bele1_prepare,
3017 .check_result = test_bele1_check,
3020 .name = "test_xadd1",
3021 .arg_sz = sizeof(struct dummy_offset),
3023 .ins = test_xadd1_prog,
3024 .nb_ins = RTE_DIM(test_xadd1_prog),
3026 .type = RTE_BPF_ARG_PTR,
3027 .size = sizeof(struct dummy_offset),
3030 .prepare = test_store1_prepare,
3031 .check_result = test_xadd1_check,
3034 .name = "test_div1",
3035 .arg_sz = sizeof(struct dummy_vect8),
3037 .ins = test_div1_prog,
3038 .nb_ins = RTE_DIM(test_div1_prog),
3040 .type = RTE_BPF_ARG_PTR,
3041 .size = sizeof(struct dummy_vect8),
3044 .prepare = test_mul1_prepare,
3045 .check_result = test_div1_check,
3048 .name = "test_call1",
3049 .arg_sz = sizeof(struct dummy_offset),
3051 .ins = test_call1_prog,
3052 .nb_ins = RTE_DIM(test_call1_prog),
3054 .type = RTE_BPF_ARG_PTR,
3055 .size = sizeof(struct dummy_offset),
3057 .xsym = test_call1_xsym,
3058 .nb_xsym = RTE_DIM(test_call1_xsym),
3060 .prepare = test_load1_prepare,
3061 .check_result = test_call1_check,
3062 /* for now don't support function calls on 32 bit platform */
3063 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3066 .name = "test_call2",
3067 .arg_sz = sizeof(struct dummy_offset),
3069 .ins = test_call2_prog,
3070 .nb_ins = RTE_DIM(test_call2_prog),
3072 .type = RTE_BPF_ARG_PTR,
3073 .size = sizeof(struct dummy_offset),
3075 .xsym = test_call2_xsym,
3076 .nb_xsym = RTE_DIM(test_call2_xsym),
3078 .prepare = test_store1_prepare,
3079 .check_result = test_call2_check,
3080 /* for now don't support function calls on 32 bit platform */
3081 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3084 .name = "test_call3",
3085 .arg_sz = sizeof(struct dummy_vect8),
3087 .ins = test_call3_prog,
3088 .nb_ins = RTE_DIM(test_call3_prog),
3090 .type = RTE_BPF_ARG_PTR,
3091 .size = sizeof(struct dummy_vect8),
3093 .xsym = test_call3_xsym,
3094 .nb_xsym = RTE_DIM(test_call3_xsym),
3096 .prepare = test_call3_prepare,
3097 .check_result = test_call3_check,
3098 /* for now don't support function calls on 32 bit platform */
3099 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3102 .name = "test_call4",
3103 .arg_sz = sizeof(struct dummy_offset),
3105 .ins = test_call4_prog,
3106 .nb_ins = RTE_DIM(test_call4_prog),
3108 .type = RTE_BPF_ARG_PTR,
3109 .size = 2 * sizeof(struct dummy_offset),
3111 .xsym = test_call4_xsym,
3112 .nb_xsym = RTE_DIM(test_call4_xsym),
3114 .prepare = test_store1_prepare,
3115 .check_result = test_call4_check,
3116 /* for now don't support function calls on 32 bit platform */
3117 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3120 .name = "test_call5",
3121 .arg_sz = sizeof(struct dummy_offset),
3123 .ins = test_call5_prog,
3124 .nb_ins = RTE_DIM(test_call5_prog),
3126 .type = RTE_BPF_ARG_PTR,
3127 .size = sizeof(struct dummy_offset),
3129 .xsym = test_call5_xsym,
3130 .nb_xsym = RTE_DIM(test_call5_xsym),
3132 .prepare = test_store1_prepare,
3133 .check_result = test_call5_check,
3134 /* for now don't support function calls on 32 bit platform */
3135 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3138 .name = "test_ld_mbuf1",
3139 .arg_sz = sizeof(struct dummy_mbuf),
3141 .ins = test_ld_mbuf1_prog,
3142 .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3144 .type = RTE_BPF_ARG_PTR_MBUF,
3145 .buf_size = sizeof(struct dummy_mbuf),
3148 .prepare = test_ld_mbuf1_prepare,
3149 .check_result = test_ld_mbuf1_check,
3150 /* mbuf as input argument is not supported on 32 bit platform */
3151 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3154 .name = "test_ld_mbuf2",
3155 .arg_sz = sizeof(struct dummy_mbuf),
3157 .ins = test_ld_mbuf1_prog,
3158 .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3160 .type = RTE_BPF_ARG_PTR_MBUF,
3161 .buf_size = sizeof(struct dummy_mbuf),
3164 .prepare = test_ld_mbuf2_prepare,
3165 .check_result = test_ld_mbuf2_check,
3166 /* mbuf as input argument is not supported on 32 bit platform */
3167 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3170 .name = "test_ld_mbuf3",
3171 .arg_sz = sizeof(struct dummy_mbuf),
3173 .ins = test_ld_mbuf3_prog,
3174 .nb_ins = RTE_DIM(test_ld_mbuf3_prog),
3176 .type = RTE_BPF_ARG_PTR_MBUF,
3177 .buf_size = sizeof(struct dummy_mbuf),
3180 .prepare = test_ld_mbuf1_prepare,
3181 .check_result = test_ld_mbuf1_check,
3182 /* mbuf as input argument is not supported on 32 bit platform */
3183 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3188 run_test(const struct bpf_test *tst)
3192 struct rte_bpf *bpf;
3193 struct rte_bpf_jit jit;
3194 uint8_t tbuf[tst->arg_sz];
3196 printf("%s(%s) start\n", __func__, tst->name);
3198 bpf = rte_bpf_load(&tst->prm);
3200 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3201 __func__, __LINE__, rte_errno, strerror(rte_errno));
3206 rc = rte_bpf_exec(bpf, tbuf);
3207 ret = tst->check_result(rc, tbuf);
3209 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
3210 __func__, __LINE__, tst->name, ret, strerror(ret));
3213 /* repeat the same test with jit, when possible */
3214 rte_bpf_get_jit(bpf, &jit);
3215 if (jit.func != NULL) {
3218 rc = jit.func(tbuf);
3219 rv = tst->check_result(rc, tbuf);
3222 printf("%s@%d: check_result(%s) failed, "
3224 __func__, __LINE__, tst->name,
3229 rte_bpf_destroy(bpf);
3241 for (i = 0; i != RTE_DIM(tests); i++) {
3242 rv = run_test(tests + i);
3243 if (tests[i].allow_fail == 0)
3250 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);