1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_malloc.h>
14 #include <rte_random.h>
15 #include <rte_byteorder.h>
16 #include <rte_errno.h>
19 #if !defined(RTE_LIB_BPF)
24 printf("BPF not supported, skipping test\n");
31 #include <rte_ether.h>
36 * Basic functional tests for librte_bpf.
37 * The main procedure - load eBPF program, execute it and
38 * compare results with expected values.
49 struct dummy_offset in[8];
50 struct dummy_offset out[8];
54 struct rte_ether_hdr eth_hdr;
55 struct rte_vlan_hdr vlan_hdr;
56 struct rte_ipv4_hdr ip_hdr;
59 #define DUMMY_MBUF_NUM 2
61 /* first mbuf in the packet, should always be at offset 0 */
63 struct rte_mbuf mb[DUMMY_MBUF_NUM];
64 uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE];
67 #define TEST_FILL_1 0xDEADBEEF
70 #define TEST_MUL_2 -100
72 #define TEST_SHIFT_1 15
73 #define TEST_SHIFT_2 33
75 #define TEST_SHIFT32_MASK (CHAR_BIT * sizeof(uint32_t) - 1)
76 #define TEST_SHIFT64_MASK (CHAR_BIT * sizeof(uint64_t) - 1)
79 #define TEST_JCC_2 -123
80 #define TEST_JCC_3 5678
81 #define TEST_JCC_4 TEST_FILL_1
83 #define TEST_IMM_1 UINT64_MAX
84 #define TEST_IMM_2 ((uint64_t)INT64_MIN)
85 #define TEST_IMM_3 ((uint64_t)INT64_MAX + INT32_MAX)
86 #define TEST_IMM_4 ((uint64_t)UINT32_MAX)
87 #define TEST_IMM_5 ((uint64_t)UINT32_MAX + 1)
89 #define TEST_MEMFROB 0x2a2a2a2a
91 #define STRING_GEEK 0x6B656567
92 #define STRING_WEEK 0x6B656577
94 #define TEST_NETMASK 0xffffff00
95 #define TEST_SUBNET 0xaca80200
97 uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
98 uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
100 uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1;
101 uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2;
106 struct rte_bpf_prm prm;
107 void (*prepare)(void *);
108 int (*check_result)(uint64_t, const void *);
113 * Compare return value and result data with expected ones.
114 * Report a failure if they don't match.
117 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
118 const void *exp_res, const void *ret_res, size_t res_sz)
123 if (exp_rc != ret_rc) {
124 printf("%s@%d: invalid return value, expected: 0x%" PRIx64
125 ",result: 0x%" PRIx64 "\n",
126 func, __LINE__, exp_rc, ret_rc);
130 if (memcmp(exp_res, ret_res, res_sz) != 0) {
131 printf("%s: invalid value\n", func);
132 rte_memdump(stdout, "expected", exp_res, res_sz);
133 rte_memdump(stdout, "result", ret_res, res_sz);
140 /* store immediate test-cases */
141 static const struct ebpf_insn test_store1_prog[] = {
143 .code = (BPF_ST | BPF_MEM | BPF_B),
144 .dst_reg = EBPF_REG_1,
145 .off = offsetof(struct dummy_offset, u8),
149 .code = (BPF_ST | BPF_MEM | BPF_H),
150 .dst_reg = EBPF_REG_1,
151 .off = offsetof(struct dummy_offset, u16),
155 .code = (BPF_ST | BPF_MEM | BPF_W),
156 .dst_reg = EBPF_REG_1,
157 .off = offsetof(struct dummy_offset, u32),
161 .code = (BPF_ST | BPF_MEM | EBPF_DW),
162 .dst_reg = EBPF_REG_1,
163 .off = offsetof(struct dummy_offset, u64),
168 .code = (BPF_ALU | EBPF_MOV | BPF_K),
169 .dst_reg = EBPF_REG_0,
173 .code = (BPF_JMP | EBPF_EXIT),
178 test_store1_prepare(void *arg)
180 struct dummy_offset *df;
183 memset(df, 0, sizeof(*df));
187 test_store1_check(uint64_t rc, const void *arg)
189 const struct dummy_offset *dft;
190 struct dummy_offset dfe;
194 memset(&dfe, 0, sizeof(dfe));
195 dfe.u64 = (int32_t)TEST_FILL_1;
200 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
203 /* store register test-cases */
204 static const struct ebpf_insn test_store2_prog[] = {
207 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
208 .dst_reg = EBPF_REG_2,
212 .code = (BPF_STX | BPF_MEM | BPF_B),
213 .dst_reg = EBPF_REG_1,
214 .src_reg = EBPF_REG_2,
215 .off = offsetof(struct dummy_offset, u8),
218 .code = (BPF_STX | BPF_MEM | BPF_H),
219 .dst_reg = EBPF_REG_1,
220 .src_reg = EBPF_REG_2,
221 .off = offsetof(struct dummy_offset, u16),
224 .code = (BPF_STX | BPF_MEM | BPF_W),
225 .dst_reg = EBPF_REG_1,
226 .src_reg = EBPF_REG_2,
227 .off = offsetof(struct dummy_offset, u32),
230 .code = (BPF_STX | BPF_MEM | EBPF_DW),
231 .dst_reg = EBPF_REG_1,
232 .src_reg = EBPF_REG_2,
233 .off = offsetof(struct dummy_offset, u64),
237 .code = (BPF_ALU | EBPF_MOV | BPF_K),
238 .dst_reg = EBPF_REG_0,
242 .code = (BPF_JMP | EBPF_EXIT),
246 /* load test-cases */
247 static const struct ebpf_insn test_load1_prog[] = {
250 .code = (BPF_LDX | BPF_MEM | BPF_B),
251 .dst_reg = EBPF_REG_2,
252 .src_reg = EBPF_REG_1,
253 .off = offsetof(struct dummy_offset, u8),
256 .code = (BPF_LDX | BPF_MEM | BPF_H),
257 .dst_reg = EBPF_REG_3,
258 .src_reg = EBPF_REG_1,
259 .off = offsetof(struct dummy_offset, u16),
262 .code = (BPF_LDX | BPF_MEM | BPF_W),
263 .dst_reg = EBPF_REG_4,
264 .src_reg = EBPF_REG_1,
265 .off = offsetof(struct dummy_offset, u32),
268 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
269 .dst_reg = EBPF_REG_0,
270 .src_reg = EBPF_REG_1,
271 .off = offsetof(struct dummy_offset, u64),
275 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
276 .dst_reg = EBPF_REG_0,
277 .src_reg = EBPF_REG_4,
280 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
281 .dst_reg = EBPF_REG_0,
282 .src_reg = EBPF_REG_3,
285 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
286 .dst_reg = EBPF_REG_0,
287 .src_reg = EBPF_REG_2,
290 .code = (BPF_JMP | EBPF_EXIT),
295 test_load1_prepare(void *arg)
297 struct dummy_offset *df;
301 memset(df, 0, sizeof(*df));
302 df->u64 = (int32_t)TEST_FILL_1;
309 test_load1_check(uint64_t rc, const void *arg)
312 const struct dummy_offset *dft;
320 return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
323 /* load immediate test-cases */
324 static const struct ebpf_insn test_ldimm1_prog[] = {
327 .code = (BPF_LD | BPF_IMM | EBPF_DW),
328 .dst_reg = EBPF_REG_0,
329 .imm = (uint32_t)TEST_IMM_1,
332 .imm = TEST_IMM_1 >> 32,
335 .code = (BPF_LD | BPF_IMM | EBPF_DW),
336 .dst_reg = EBPF_REG_3,
337 .imm = (uint32_t)TEST_IMM_2,
340 .imm = TEST_IMM_2 >> 32,
343 .code = (BPF_LD | BPF_IMM | EBPF_DW),
344 .dst_reg = EBPF_REG_5,
345 .imm = (uint32_t)TEST_IMM_3,
348 .imm = TEST_IMM_3 >> 32,
351 .code = (BPF_LD | BPF_IMM | EBPF_DW),
352 .dst_reg = EBPF_REG_7,
353 .imm = (uint32_t)TEST_IMM_4,
356 .imm = TEST_IMM_4 >> 32,
359 .code = (BPF_LD | BPF_IMM | EBPF_DW),
360 .dst_reg = EBPF_REG_9,
361 .imm = (uint32_t)TEST_IMM_5,
364 .imm = TEST_IMM_5 >> 32,
368 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
369 .dst_reg = EBPF_REG_0,
370 .src_reg = EBPF_REG_3,
373 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
374 .dst_reg = EBPF_REG_0,
375 .src_reg = EBPF_REG_5,
378 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
379 .dst_reg = EBPF_REG_0,
380 .src_reg = EBPF_REG_7,
383 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
384 .dst_reg = EBPF_REG_0,
385 .src_reg = EBPF_REG_9,
388 .code = (BPF_JMP | EBPF_EXIT),
393 test_ldimm1_check(uint64_t rc, const void *arg)
407 return cmp_res(__func__, v1, rc, arg, arg, 0);
411 /* alu mul test-cases */
412 static const struct ebpf_insn test_mul1_prog[] = {
415 .code = (BPF_LDX | BPF_MEM | BPF_W),
416 .dst_reg = EBPF_REG_2,
417 .src_reg = EBPF_REG_1,
418 .off = offsetof(struct dummy_vect8, in[0].u32),
421 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
422 .dst_reg = EBPF_REG_3,
423 .src_reg = EBPF_REG_1,
424 .off = offsetof(struct dummy_vect8, in[1].u64),
427 .code = (BPF_LDX | BPF_MEM | BPF_W),
428 .dst_reg = EBPF_REG_4,
429 .src_reg = EBPF_REG_1,
430 .off = offsetof(struct dummy_vect8, in[2].u32),
433 .code = (BPF_ALU | BPF_MUL | BPF_K),
434 .dst_reg = EBPF_REG_2,
438 .code = (EBPF_ALU64 | BPF_MUL | BPF_K),
439 .dst_reg = EBPF_REG_3,
443 .code = (BPF_ALU | BPF_MUL | BPF_X),
444 .dst_reg = EBPF_REG_4,
445 .src_reg = EBPF_REG_2,
448 .code = (EBPF_ALU64 | BPF_MUL | BPF_X),
449 .dst_reg = EBPF_REG_4,
450 .src_reg = EBPF_REG_3,
453 .code = (BPF_STX | BPF_MEM | EBPF_DW),
454 .dst_reg = EBPF_REG_1,
455 .src_reg = EBPF_REG_2,
456 .off = offsetof(struct dummy_vect8, out[0].u64),
459 .code = (BPF_STX | BPF_MEM | EBPF_DW),
460 .dst_reg = EBPF_REG_1,
461 .src_reg = EBPF_REG_3,
462 .off = offsetof(struct dummy_vect8, out[1].u64),
465 .code = (BPF_STX | BPF_MEM | EBPF_DW),
466 .dst_reg = EBPF_REG_1,
467 .src_reg = EBPF_REG_4,
468 .off = offsetof(struct dummy_vect8, out[2].u64),
472 .code = (BPF_ALU | EBPF_MOV | BPF_K),
473 .dst_reg = EBPF_REG_0,
477 .code = (BPF_JMP | EBPF_EXIT),
482 test_mul1_prepare(void *arg)
484 struct dummy_vect8 *dv;
491 memset(dv, 0, sizeof(*dv));
493 dv->in[1].u64 = v << 12 | v >> 6;
498 test_mul1_check(uint64_t rc, const void *arg)
501 const struct dummy_vect8 *dvt;
502 struct dummy_vect8 dve;
505 memset(&dve, 0, sizeof(dve));
511 r2 = (uint32_t)r2 * TEST_MUL_1;
513 r4 = (uint32_t)(r4 * r2);
520 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
523 /* alu shift test-cases */
524 static const struct ebpf_insn test_shift1_prog[] = {
527 .code = (BPF_LDX | BPF_MEM | BPF_W),
528 .dst_reg = EBPF_REG_2,
529 .src_reg = EBPF_REG_1,
530 .off = offsetof(struct dummy_vect8, in[0].u32),
533 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
534 .dst_reg = EBPF_REG_3,
535 .src_reg = EBPF_REG_1,
536 .off = offsetof(struct dummy_vect8, in[1].u64),
539 .code = (BPF_LDX | BPF_MEM | BPF_W),
540 .dst_reg = EBPF_REG_4,
541 .src_reg = EBPF_REG_1,
542 .off = offsetof(struct dummy_vect8, in[2].u32),
545 .code = (BPF_ALU | BPF_LSH | BPF_K),
546 .dst_reg = EBPF_REG_2,
550 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
551 .dst_reg = EBPF_REG_3,
555 .code = (BPF_STX | BPF_MEM | EBPF_DW),
556 .dst_reg = EBPF_REG_1,
557 .src_reg = EBPF_REG_2,
558 .off = offsetof(struct dummy_vect8, out[0].u64),
561 .code = (BPF_STX | BPF_MEM | EBPF_DW),
562 .dst_reg = EBPF_REG_1,
563 .src_reg = EBPF_REG_3,
564 .off = offsetof(struct dummy_vect8, out[1].u64),
567 .code = (BPF_ALU | BPF_AND | BPF_K),
568 .dst_reg = EBPF_REG_4,
569 .imm = TEST_SHIFT64_MASK,
572 .code = (EBPF_ALU64 | BPF_LSH | BPF_X),
573 .dst_reg = EBPF_REG_3,
574 .src_reg = EBPF_REG_4,
577 .code = (BPF_ALU | BPF_AND | BPF_K),
578 .dst_reg = EBPF_REG_4,
579 .imm = TEST_SHIFT32_MASK,
582 .code = (BPF_ALU | BPF_RSH | BPF_X),
583 .dst_reg = EBPF_REG_2,
584 .src_reg = EBPF_REG_4,
587 .code = (BPF_STX | BPF_MEM | EBPF_DW),
588 .dst_reg = EBPF_REG_1,
589 .src_reg = EBPF_REG_2,
590 .off = offsetof(struct dummy_vect8, out[2].u64),
593 .code = (BPF_STX | BPF_MEM | EBPF_DW),
594 .dst_reg = EBPF_REG_1,
595 .src_reg = EBPF_REG_3,
596 .off = offsetof(struct dummy_vect8, out[3].u64),
599 .code = (BPF_LDX | BPF_MEM | BPF_W),
600 .dst_reg = EBPF_REG_2,
601 .src_reg = EBPF_REG_1,
602 .off = offsetof(struct dummy_vect8, in[0].u32),
605 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
606 .dst_reg = EBPF_REG_3,
607 .src_reg = EBPF_REG_1,
608 .off = offsetof(struct dummy_vect8, in[1].u64),
611 .code = (BPF_LDX | BPF_MEM | BPF_W),
612 .dst_reg = EBPF_REG_4,
613 .src_reg = EBPF_REG_1,
614 .off = offsetof(struct dummy_vect8, in[2].u32),
617 .code = (BPF_ALU | BPF_AND | BPF_K),
618 .dst_reg = EBPF_REG_2,
619 .imm = TEST_SHIFT64_MASK,
622 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
623 .dst_reg = EBPF_REG_3,
624 .src_reg = EBPF_REG_2,
627 .code = (BPF_ALU | BPF_AND | BPF_K),
628 .dst_reg = EBPF_REG_2,
629 .imm = TEST_SHIFT32_MASK,
632 .code = (BPF_ALU | BPF_LSH | BPF_X),
633 .dst_reg = EBPF_REG_4,
634 .src_reg = EBPF_REG_2,
637 .code = (BPF_STX | BPF_MEM | EBPF_DW),
638 .dst_reg = EBPF_REG_1,
639 .src_reg = EBPF_REG_4,
640 .off = offsetof(struct dummy_vect8, out[4].u64),
643 .code = (BPF_STX | BPF_MEM | EBPF_DW),
644 .dst_reg = EBPF_REG_1,
645 .src_reg = EBPF_REG_3,
646 .off = offsetof(struct dummy_vect8, out[5].u64),
650 .code = (BPF_ALU | EBPF_MOV | BPF_K),
651 .dst_reg = EBPF_REG_0,
655 .code = (BPF_JMP | EBPF_EXIT),
660 test_shift1_prepare(void *arg)
662 struct dummy_vect8 *dv;
669 memset(dv, 0, sizeof(*dv));
671 dv->in[1].u64 = v << 12 | v >> 6;
672 dv->in[2].u32 = (-v ^ 5);
676 test_shift1_check(uint64_t rc, const void *arg)
679 const struct dummy_vect8 *dvt;
680 struct dummy_vect8 dve;
683 memset(&dve, 0, sizeof(dve));
689 r2 = (uint32_t)r2 << TEST_SHIFT_1;
690 r3 = (int64_t)r3 >> TEST_SHIFT_2;
695 r4 &= TEST_SHIFT64_MASK;
697 r4 &= TEST_SHIFT32_MASK;
698 r2 = (uint32_t)r2 >> r4;
707 r2 &= TEST_SHIFT64_MASK;
708 r3 = (int64_t)r3 >> r2;
709 r2 &= TEST_SHIFT32_MASK;
710 r4 = (uint32_t)r4 << r2;
715 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
719 static const struct ebpf_insn test_jump1_prog[] = {
722 .code = (BPF_ALU | EBPF_MOV | BPF_K),
723 .dst_reg = EBPF_REG_0,
727 .code = (BPF_LDX | BPF_MEM | BPF_W),
728 .dst_reg = EBPF_REG_2,
729 .src_reg = EBPF_REG_1,
730 .off = offsetof(struct dummy_vect8, in[0].u32),
733 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
734 .dst_reg = EBPF_REG_3,
735 .src_reg = EBPF_REG_1,
736 .off = offsetof(struct dummy_vect8, in[0].u64),
739 .code = (BPF_LDX | BPF_MEM | BPF_W),
740 .dst_reg = EBPF_REG_4,
741 .src_reg = EBPF_REG_1,
742 .off = offsetof(struct dummy_vect8, in[1].u32),
745 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
746 .dst_reg = EBPF_REG_5,
747 .src_reg = EBPF_REG_1,
748 .off = offsetof(struct dummy_vect8, in[1].u64),
751 .code = (BPF_JMP | BPF_JEQ | BPF_K),
752 .dst_reg = EBPF_REG_2,
757 .code = (BPF_JMP | EBPF_JSLE | BPF_K),
758 .dst_reg = EBPF_REG_3,
763 .code = (BPF_JMP | BPF_JGT | BPF_K),
764 .dst_reg = EBPF_REG_4,
769 .code = (BPF_JMP | BPF_JSET | BPF_K),
770 .dst_reg = EBPF_REG_5,
775 .code = (BPF_JMP | EBPF_JNE | BPF_X),
776 .dst_reg = EBPF_REG_2,
777 .src_reg = EBPF_REG_3,
781 .code = (BPF_JMP | EBPF_JSGT | BPF_X),
782 .dst_reg = EBPF_REG_2,
783 .src_reg = EBPF_REG_4,
787 .code = (BPF_JMP | EBPF_JLE | BPF_X),
788 .dst_reg = EBPF_REG_2,
789 .src_reg = EBPF_REG_5,
793 .code = (BPF_JMP | BPF_JSET | BPF_X),
794 .dst_reg = EBPF_REG_3,
795 .src_reg = EBPF_REG_5,
799 .code = (BPF_JMP | EBPF_EXIT),
802 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
803 .dst_reg = EBPF_REG_0,
807 .code = (BPF_JMP | BPF_JA),
811 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
812 .dst_reg = EBPF_REG_0,
816 .code = (BPF_JMP | BPF_JA),
820 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
821 .dst_reg = EBPF_REG_0,
825 .code = (BPF_JMP | BPF_JA),
829 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
830 .dst_reg = EBPF_REG_0,
834 .code = (BPF_JMP | BPF_JA),
838 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
839 .dst_reg = EBPF_REG_0,
843 .code = (BPF_JMP | BPF_JA),
847 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
848 .dst_reg = EBPF_REG_0,
852 .code = (BPF_JMP | BPF_JA),
856 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
857 .dst_reg = EBPF_REG_0,
861 .code = (BPF_JMP | BPF_JA),
865 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
866 .dst_reg = EBPF_REG_0,
870 .code = (BPF_JMP | BPF_JA),
876 test_jump1_prepare(void *arg)
878 struct dummy_vect8 *dv;
886 memset(dv, 0, sizeof(*dv));
889 dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
890 dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
894 test_jump1_check(uint64_t rc, const void *arg)
896 uint64_t r2, r3, r4, r5, rv;
897 const struct dummy_vect8 *dvt;
907 if (r2 == TEST_JCC_1)
909 if ((int64_t)r3 <= TEST_JCC_2)
917 if ((int64_t)r2 > (int64_t)r4)
924 return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
927 /* Jump test case - check ip4_dest in particular subnet */
928 static const struct ebpf_insn test_jump2_prog[] = {
931 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
932 .dst_reg = EBPF_REG_2,
936 .code = (BPF_LDX | BPF_MEM | BPF_H),
937 .dst_reg = EBPF_REG_3,
938 .src_reg = EBPF_REG_1,
942 .code = (BPF_JMP | EBPF_JNE | BPF_K),
943 .dst_reg = EBPF_REG_3,
948 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
949 .dst_reg = EBPF_REG_2,
953 .code = (BPF_LDX | BPF_MEM | BPF_H),
954 .dst_reg = EBPF_REG_3,
955 .src_reg = EBPF_REG_1,
959 .code = (EBPF_ALU64 | BPF_AND | BPF_K),
960 .dst_reg = EBPF_REG_3,
964 .code = (BPF_JMP | EBPF_JNE | BPF_K),
965 .dst_reg = EBPF_REG_3,
970 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
971 .dst_reg = EBPF_REG_1,
972 .src_reg = EBPF_REG_2,
975 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
976 .dst_reg = EBPF_REG_0,
980 .code = (BPF_LDX | BPF_MEM | BPF_W),
981 .dst_reg = EBPF_REG_1,
982 .src_reg = EBPF_REG_1,
986 .code = (BPF_ALU | EBPF_MOV | BPF_K),
987 .dst_reg = EBPF_REG_3,
991 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
992 .dst_reg = EBPF_REG_3,
993 .imm = sizeof(uint32_t) * CHAR_BIT,
996 .code = (BPF_ALU | BPF_AND | BPF_X),
997 .dst_reg = EBPF_REG_1,
998 .src_reg = EBPF_REG_3,
1001 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1002 .dst_reg = EBPF_REG_3,
1006 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1007 .dst_reg = EBPF_REG_3,
1008 .imm = sizeof(uint32_t) * CHAR_BIT,
1011 .code = (BPF_JMP | BPF_JEQ | BPF_X),
1012 .dst_reg = EBPF_REG_1,
1013 .src_reg = EBPF_REG_3,
1017 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1018 .dst_reg = EBPF_REG_0,
1022 .code = (BPF_JMP | EBPF_EXIT),
1026 /* Preparing a vlan packet */
1028 test_jump2_prepare(void *arg)
1030 struct dummy_net *dn;
1033 memset(dn, 0, sizeof(*dn));
1036 * Initialize ether header.
1038 rte_ether_addr_copy((struct rte_ether_addr *)dst_mac,
1039 &dn->eth_hdr.dst_addr);
1040 rte_ether_addr_copy((struct rte_ether_addr *)src_mac,
1041 &dn->eth_hdr.src_addr);
1042 dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1045 * Initialize vlan header.
1047 dn->vlan_hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1048 dn->vlan_hdr.vlan_tci = 32;
1051 * Initialize IP header.
1053 dn->ip_hdr.version_ihl = 0x45; /*IP_VERSION | IP_HDRLEN*/
1054 dn->ip_hdr.time_to_live = 64; /* IP_DEFTTL */
1055 dn->ip_hdr.next_proto_id = IPPROTO_TCP;
1056 dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c);
1057 dn->ip_hdr.total_length = rte_cpu_to_be_16(60);
1058 dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr);
1059 dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr);
1063 test_jump2_check(uint64_t rc, const void *arg)
1065 const struct rte_ether_hdr *eth_hdr = arg;
1066 const struct rte_ipv4_hdr *ipv4_hdr;
1067 const void *next = eth_hdr;
1071 if (eth_hdr->ether_type == htons(0x8100)) {
1072 const struct rte_vlan_hdr *vlan_hdr =
1073 (const void *)(eth_hdr + 1);
1074 eth_type = vlan_hdr->eth_proto;
1075 next = vlan_hdr + 1;
1077 eth_type = eth_hdr->ether_type;
1081 if (eth_type == htons(0x0800)) {
1083 if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) ==
1084 rte_cpu_to_be_32(TEST_SUBNET)) {
1089 return cmp_res(__func__, v, rc, arg, arg, sizeof(arg));
1092 /* alu (add, sub, and, or, xor, neg) test-cases */
1093 static const struct ebpf_insn test_alu1_prog[] = {
1096 .code = (BPF_LDX | BPF_MEM | BPF_W),
1097 .dst_reg = EBPF_REG_2,
1098 .src_reg = EBPF_REG_1,
1099 .off = offsetof(struct dummy_vect8, in[0].u32),
1102 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1103 .dst_reg = EBPF_REG_3,
1104 .src_reg = EBPF_REG_1,
1105 .off = offsetof(struct dummy_vect8, in[0].u64),
1108 .code = (BPF_LDX | BPF_MEM | BPF_W),
1109 .dst_reg = EBPF_REG_4,
1110 .src_reg = EBPF_REG_1,
1111 .off = offsetof(struct dummy_vect8, in[1].u32),
1114 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1115 .dst_reg = EBPF_REG_5,
1116 .src_reg = EBPF_REG_1,
1117 .off = offsetof(struct dummy_vect8, in[1].u64),
1120 .code = (BPF_ALU | BPF_AND | BPF_K),
1121 .dst_reg = EBPF_REG_2,
1125 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1126 .dst_reg = EBPF_REG_3,
1130 .code = (BPF_ALU | BPF_XOR | BPF_K),
1131 .dst_reg = EBPF_REG_4,
1135 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1136 .dst_reg = EBPF_REG_5,
1140 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1141 .dst_reg = EBPF_REG_1,
1142 .src_reg = EBPF_REG_2,
1143 .off = offsetof(struct dummy_vect8, out[0].u64),
1146 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1147 .dst_reg = EBPF_REG_1,
1148 .src_reg = EBPF_REG_3,
1149 .off = offsetof(struct dummy_vect8, out[1].u64),
1152 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1153 .dst_reg = EBPF_REG_1,
1154 .src_reg = EBPF_REG_4,
1155 .off = offsetof(struct dummy_vect8, out[2].u64),
1158 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1159 .dst_reg = EBPF_REG_1,
1160 .src_reg = EBPF_REG_5,
1161 .off = offsetof(struct dummy_vect8, out[3].u64),
1164 .code = (BPF_ALU | BPF_OR | BPF_X),
1165 .dst_reg = EBPF_REG_2,
1166 .src_reg = EBPF_REG_3,
1169 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
1170 .dst_reg = EBPF_REG_3,
1171 .src_reg = EBPF_REG_4,
1174 .code = (BPF_ALU | BPF_SUB | BPF_X),
1175 .dst_reg = EBPF_REG_4,
1176 .src_reg = EBPF_REG_5,
1179 .code = (EBPF_ALU64 | BPF_AND | BPF_X),
1180 .dst_reg = EBPF_REG_5,
1181 .src_reg = EBPF_REG_2,
1184 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1185 .dst_reg = EBPF_REG_1,
1186 .src_reg = EBPF_REG_2,
1187 .off = offsetof(struct dummy_vect8, out[4].u64),
1190 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1191 .dst_reg = EBPF_REG_1,
1192 .src_reg = EBPF_REG_3,
1193 .off = offsetof(struct dummy_vect8, out[5].u64),
1196 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1197 .dst_reg = EBPF_REG_1,
1198 .src_reg = EBPF_REG_4,
1199 .off = offsetof(struct dummy_vect8, out[6].u64),
1202 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1203 .dst_reg = EBPF_REG_1,
1204 .src_reg = EBPF_REG_5,
1205 .off = offsetof(struct dummy_vect8, out[7].u64),
1207 /* return (-r2 + (-r3)) */
1209 .code = (BPF_ALU | BPF_NEG),
1210 .dst_reg = EBPF_REG_2,
1213 .code = (EBPF_ALU64 | BPF_NEG),
1214 .dst_reg = EBPF_REG_3,
1217 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1218 .dst_reg = EBPF_REG_2,
1219 .src_reg = EBPF_REG_3,
1222 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1223 .dst_reg = EBPF_REG_0,
1224 .src_reg = EBPF_REG_2,
1227 .code = (BPF_JMP | EBPF_EXIT),
1232 test_alu1_check(uint64_t rc, const void *arg)
1234 uint64_t r2, r3, r4, r5, rv;
1235 const struct dummy_vect8 *dvt;
1236 struct dummy_vect8 dve;
1239 memset(&dve, 0, sizeof(dve));
1241 r2 = dvt->in[0].u32;
1242 r3 = dvt->in[0].u64;
1243 r4 = dvt->in[1].u32;
1244 r5 = dvt->in[1].u64;
1246 r2 = (uint32_t)r2 & TEST_FILL_1;
1247 r3 |= (int32_t) TEST_FILL_1;
1248 r4 = (uint32_t)r4 ^ TEST_FILL_1;
1249 r5 += (int32_t)TEST_FILL_1;
1251 dve.out[0].u64 = r2;
1252 dve.out[1].u64 = r3;
1253 dve.out[2].u64 = r4;
1254 dve.out[3].u64 = r5;
1256 r2 = (uint32_t)r2 | (uint32_t)r3;
1258 r4 = (uint32_t)r4 - (uint32_t)r5;
1261 dve.out[4].u64 = r2;
1262 dve.out[5].u64 = r3;
1263 dve.out[6].u64 = r4;
1264 dve.out[7].u64 = r5;
1271 return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1274 /* endianness conversions (BE->LE/LE->BE) test-cases */
1275 static const struct ebpf_insn test_bele1_prog[] = {
1278 .code = (BPF_LDX | BPF_MEM | BPF_H),
1279 .dst_reg = EBPF_REG_2,
1280 .src_reg = EBPF_REG_1,
1281 .off = offsetof(struct dummy_vect8, in[0].u16),
1284 .code = (BPF_LDX | BPF_MEM | BPF_W),
1285 .dst_reg = EBPF_REG_3,
1286 .src_reg = EBPF_REG_1,
1287 .off = offsetof(struct dummy_vect8, in[0].u32),
1290 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1291 .dst_reg = EBPF_REG_4,
1292 .src_reg = EBPF_REG_1,
1293 .off = offsetof(struct dummy_vect8, in[0].u64),
1296 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1297 .dst_reg = EBPF_REG_2,
1298 .imm = sizeof(uint16_t) * CHAR_BIT,
1301 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1302 .dst_reg = EBPF_REG_3,
1303 .imm = sizeof(uint32_t) * CHAR_BIT,
1306 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1307 .dst_reg = EBPF_REG_4,
1308 .imm = sizeof(uint64_t) * CHAR_BIT,
1311 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1312 .dst_reg = EBPF_REG_1,
1313 .src_reg = EBPF_REG_2,
1314 .off = offsetof(struct dummy_vect8, out[0].u64),
1317 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1318 .dst_reg = EBPF_REG_1,
1319 .src_reg = EBPF_REG_3,
1320 .off = offsetof(struct dummy_vect8, out[1].u64),
1323 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1324 .dst_reg = EBPF_REG_1,
1325 .src_reg = EBPF_REG_4,
1326 .off = offsetof(struct dummy_vect8, out[2].u64),
1329 .code = (BPF_LDX | BPF_MEM | BPF_H),
1330 .dst_reg = EBPF_REG_2,
1331 .src_reg = EBPF_REG_1,
1332 .off = offsetof(struct dummy_vect8, in[0].u16),
1335 .code = (BPF_LDX | BPF_MEM | BPF_W),
1336 .dst_reg = EBPF_REG_3,
1337 .src_reg = EBPF_REG_1,
1338 .off = offsetof(struct dummy_vect8, in[0].u32),
1341 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1342 .dst_reg = EBPF_REG_4,
1343 .src_reg = EBPF_REG_1,
1344 .off = offsetof(struct dummy_vect8, in[0].u64),
1347 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1348 .dst_reg = EBPF_REG_2,
1349 .imm = sizeof(uint16_t) * CHAR_BIT,
1352 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1353 .dst_reg = EBPF_REG_3,
1354 .imm = sizeof(uint32_t) * CHAR_BIT,
1357 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1358 .dst_reg = EBPF_REG_4,
1359 .imm = sizeof(uint64_t) * CHAR_BIT,
1362 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1363 .dst_reg = EBPF_REG_1,
1364 .src_reg = EBPF_REG_2,
1365 .off = offsetof(struct dummy_vect8, out[3].u64),
1368 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1369 .dst_reg = EBPF_REG_1,
1370 .src_reg = EBPF_REG_3,
1371 .off = offsetof(struct dummy_vect8, out[4].u64),
1374 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1375 .dst_reg = EBPF_REG_1,
1376 .src_reg = EBPF_REG_4,
1377 .off = offsetof(struct dummy_vect8, out[5].u64),
1381 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1382 .dst_reg = EBPF_REG_0,
1386 .code = (BPF_JMP | EBPF_EXIT),
1391 test_bele1_prepare(void *arg)
1393 struct dummy_vect8 *dv;
1397 memset(dv, 0, sizeof(*dv));
1398 dv->in[0].u64 = rte_rand();
1399 dv->in[0].u32 = dv->in[0].u64;
1400 dv->in[0].u16 = dv->in[0].u64;
1404 test_bele1_check(uint64_t rc, const void *arg)
1406 uint64_t r2, r3, r4;
1407 const struct dummy_vect8 *dvt;
1408 struct dummy_vect8 dve;
1411 memset(&dve, 0, sizeof(dve));
1413 r2 = dvt->in[0].u16;
1414 r3 = dvt->in[0].u32;
1415 r4 = dvt->in[0].u64;
1417 r2 = rte_cpu_to_be_16(r2);
1418 r3 = rte_cpu_to_be_32(r3);
1419 r4 = rte_cpu_to_be_64(r4);
1421 dve.out[0].u64 = r2;
1422 dve.out[1].u64 = r3;
1423 dve.out[2].u64 = r4;
1425 r2 = dvt->in[0].u16;
1426 r3 = dvt->in[0].u32;
1427 r4 = dvt->in[0].u64;
1429 r2 = rte_cpu_to_le_16(r2);
1430 r3 = rte_cpu_to_le_32(r3);
1431 r4 = rte_cpu_to_le_64(r4);
1433 dve.out[3].u64 = r2;
1434 dve.out[4].u64 = r3;
1435 dve.out[5].u64 = r4;
1437 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1440 /* atomic add test-cases */
1441 static const struct ebpf_insn test_xadd1_prog[] = {
1444 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1445 .dst_reg = EBPF_REG_2,
1449 .code = (BPF_STX | EBPF_XADD | BPF_W),
1450 .dst_reg = EBPF_REG_1,
1451 .src_reg = EBPF_REG_2,
1452 .off = offsetof(struct dummy_offset, u32),
1455 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1456 .dst_reg = EBPF_REG_1,
1457 .src_reg = EBPF_REG_2,
1458 .off = offsetof(struct dummy_offset, u64),
1461 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1462 .dst_reg = EBPF_REG_3,
1466 .code = (BPF_STX | EBPF_XADD | BPF_W),
1467 .dst_reg = EBPF_REG_1,
1468 .src_reg = EBPF_REG_3,
1469 .off = offsetof(struct dummy_offset, u32),
1472 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1473 .dst_reg = EBPF_REG_1,
1474 .src_reg = EBPF_REG_3,
1475 .off = offsetof(struct dummy_offset, u64),
1478 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1479 .dst_reg = EBPF_REG_4,
1483 .code = (BPF_STX | EBPF_XADD | BPF_W),
1484 .dst_reg = EBPF_REG_1,
1485 .src_reg = EBPF_REG_4,
1486 .off = offsetof(struct dummy_offset, u32),
1489 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1490 .dst_reg = EBPF_REG_1,
1491 .src_reg = EBPF_REG_4,
1492 .off = offsetof(struct dummy_offset, u64),
1495 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1496 .dst_reg = EBPF_REG_5,
1500 .code = (BPF_STX | EBPF_XADD | BPF_W),
1501 .dst_reg = EBPF_REG_1,
1502 .src_reg = EBPF_REG_5,
1503 .off = offsetof(struct dummy_offset, u32),
1506 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1507 .dst_reg = EBPF_REG_1,
1508 .src_reg = EBPF_REG_5,
1509 .off = offsetof(struct dummy_offset, u64),
1512 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1513 .dst_reg = EBPF_REG_6,
1517 .code = (BPF_STX | EBPF_XADD | BPF_W),
1518 .dst_reg = EBPF_REG_1,
1519 .src_reg = EBPF_REG_6,
1520 .off = offsetof(struct dummy_offset, u32),
1523 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1524 .dst_reg = EBPF_REG_1,
1525 .src_reg = EBPF_REG_6,
1526 .off = offsetof(struct dummy_offset, u64),
1529 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1530 .dst_reg = EBPF_REG_7,
1534 .code = (BPF_STX | EBPF_XADD | BPF_W),
1535 .dst_reg = EBPF_REG_1,
1536 .src_reg = EBPF_REG_7,
1537 .off = offsetof(struct dummy_offset, u32),
1540 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1541 .dst_reg = EBPF_REG_1,
1542 .src_reg = EBPF_REG_7,
1543 .off = offsetof(struct dummy_offset, u64),
1546 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1547 .dst_reg = EBPF_REG_8,
1551 .code = (BPF_STX | EBPF_XADD | BPF_W),
1552 .dst_reg = EBPF_REG_1,
1553 .src_reg = EBPF_REG_8,
1554 .off = offsetof(struct dummy_offset, u32),
1557 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1558 .dst_reg = EBPF_REG_1,
1559 .src_reg = EBPF_REG_8,
1560 .off = offsetof(struct dummy_offset, u64),
1564 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1565 .dst_reg = EBPF_REG_0,
1569 .code = (BPF_JMP | EBPF_EXIT),
1574 test_xadd1_check(uint64_t rc, const void *arg)
1577 const struct dummy_offset *dft;
1578 struct dummy_offset dfe;
1581 memset(&dfe, 0, sizeof(dfe));
1584 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1585 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1588 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1589 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1591 rv = (int32_t)TEST_FILL_1;
1592 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1593 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1596 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1597 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1600 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1601 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1604 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1605 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1608 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1609 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1611 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1614 /* alu div test-cases */
1615 static const struct ebpf_insn test_div1_prog[] = {
1618 .code = (BPF_LDX | BPF_MEM | BPF_W),
1619 .dst_reg = EBPF_REG_2,
1620 .src_reg = EBPF_REG_1,
1621 .off = offsetof(struct dummy_vect8, in[0].u32),
1624 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1625 .dst_reg = EBPF_REG_3,
1626 .src_reg = EBPF_REG_1,
1627 .off = offsetof(struct dummy_vect8, in[1].u64),
1630 .code = (BPF_LDX | BPF_MEM | BPF_W),
1631 .dst_reg = EBPF_REG_4,
1632 .src_reg = EBPF_REG_1,
1633 .off = offsetof(struct dummy_vect8, in[2].u32),
1636 .code = (BPF_ALU | BPF_DIV | BPF_K),
1637 .dst_reg = EBPF_REG_2,
1641 .code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1642 .dst_reg = EBPF_REG_3,
1646 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1647 .dst_reg = EBPF_REG_2,
1651 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1652 .dst_reg = EBPF_REG_3,
1656 .code = (BPF_ALU | BPF_MOD | BPF_X),
1657 .dst_reg = EBPF_REG_4,
1658 .src_reg = EBPF_REG_2,
1661 .code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1662 .dst_reg = EBPF_REG_4,
1663 .src_reg = EBPF_REG_3,
1666 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1667 .dst_reg = EBPF_REG_1,
1668 .src_reg = EBPF_REG_2,
1669 .off = offsetof(struct dummy_vect8, out[0].u64),
1672 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1673 .dst_reg = EBPF_REG_1,
1674 .src_reg = EBPF_REG_3,
1675 .off = offsetof(struct dummy_vect8, out[1].u64),
1678 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1679 .dst_reg = EBPF_REG_1,
1680 .src_reg = EBPF_REG_4,
1681 .off = offsetof(struct dummy_vect8, out[2].u64),
1683 /* check that we can handle division by zero gracefully. */
1685 .code = (BPF_LDX | BPF_MEM | BPF_W),
1686 .dst_reg = EBPF_REG_2,
1687 .src_reg = EBPF_REG_1,
1688 .off = offsetof(struct dummy_vect8, in[3].u32),
1691 .code = (BPF_ALU | BPF_DIV | BPF_X),
1692 .dst_reg = EBPF_REG_4,
1693 .src_reg = EBPF_REG_2,
1697 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1698 .dst_reg = EBPF_REG_0,
1702 .code = (BPF_JMP | EBPF_EXIT),
1707 test_div1_check(uint64_t rc, const void *arg)
1709 uint64_t r2, r3, r4;
1710 const struct dummy_vect8 *dvt;
1711 struct dummy_vect8 dve;
1714 memset(&dve, 0, sizeof(dve));
1716 r2 = dvt->in[0].u32;
1717 r3 = dvt->in[1].u64;
1718 r4 = dvt->in[2].u32;
1720 r2 = (uint32_t)r2 / TEST_MUL_1;
1724 r4 = (uint32_t)(r4 % r2);
1727 dve.out[0].u64 = r2;
1728 dve.out[1].u64 = r3;
1729 dve.out[2].u64 = r4;
1732 * in the test prog we attempted to divide by zero.
1733 * so return value should return 0.
1735 return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1738 /* call test-cases */
1739 static const struct ebpf_insn test_call1_prog[] = {
1742 .code = (BPF_LDX | BPF_MEM | BPF_W),
1743 .dst_reg = EBPF_REG_2,
1744 .src_reg = EBPF_REG_1,
1745 .off = offsetof(struct dummy_offset, u32),
1748 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1749 .dst_reg = EBPF_REG_3,
1750 .src_reg = EBPF_REG_1,
1751 .off = offsetof(struct dummy_offset, u64),
1754 .code = (BPF_STX | BPF_MEM | BPF_W),
1755 .dst_reg = EBPF_REG_10,
1756 .src_reg = EBPF_REG_2,
1760 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1761 .dst_reg = EBPF_REG_10,
1762 .src_reg = EBPF_REG_3,
1766 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1767 .dst_reg = EBPF_REG_2,
1768 .src_reg = EBPF_REG_10,
1771 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1772 .dst_reg = EBPF_REG_2,
1776 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1777 .dst_reg = EBPF_REG_3,
1778 .src_reg = EBPF_REG_10,
1781 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1782 .dst_reg = EBPF_REG_3,
1786 .code = (BPF_JMP | EBPF_CALL),
1790 .code = (BPF_LDX | BPF_MEM | BPF_W),
1791 .dst_reg = EBPF_REG_2,
1792 .src_reg = EBPF_REG_10,
1796 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1797 .dst_reg = EBPF_REG_0,
1798 .src_reg = EBPF_REG_10,
1802 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1803 .dst_reg = EBPF_REG_0,
1804 .src_reg = EBPF_REG_2,
1807 .code = (BPF_JMP | EBPF_EXIT),
1812 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1814 const struct dummy_offset *dv;
1823 test_call1_check(uint64_t rc, const void *arg)
1827 const struct dummy_offset *dv;
1833 dummy_func1(arg, &v32, &v64);
1836 return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1839 static const struct rte_bpf_xsym test_call1_xsym[] = {
1841 .name = RTE_STR(dummy_func1),
1842 .type = RTE_BPF_XTYPE_FUNC,
1844 .val = (void *)dummy_func1,
1848 .type = RTE_BPF_ARG_PTR,
1849 .size = sizeof(struct dummy_offset),
1852 .type = RTE_BPF_ARG_PTR,
1853 .size = sizeof(uint32_t),
1856 .type = RTE_BPF_ARG_PTR,
1857 .size = sizeof(uint64_t),
1864 static const struct ebpf_insn test_call2_prog[] = {
1867 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1868 .dst_reg = EBPF_REG_1,
1869 .src_reg = EBPF_REG_10,
1872 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1873 .dst_reg = EBPF_REG_1,
1874 .imm = -(int32_t)sizeof(struct dummy_offset),
1877 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1878 .dst_reg = EBPF_REG_2,
1879 .src_reg = EBPF_REG_10,
1882 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1883 .dst_reg = EBPF_REG_2,
1884 .imm = -2 * (int32_t)sizeof(struct dummy_offset),
1887 .code = (BPF_JMP | EBPF_CALL),
1891 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1892 .dst_reg = EBPF_REG_1,
1893 .src_reg = EBPF_REG_10,
1894 .off = -(int32_t)(sizeof(struct dummy_offset) -
1895 offsetof(struct dummy_offset, u64)),
1898 .code = (BPF_LDX | BPF_MEM | BPF_W),
1899 .dst_reg = EBPF_REG_0,
1900 .src_reg = EBPF_REG_10,
1901 .off = -(int32_t)(sizeof(struct dummy_offset) -
1902 offsetof(struct dummy_offset, u32)),
1905 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1906 .dst_reg = EBPF_REG_0,
1907 .src_reg = EBPF_REG_1,
1910 .code = (BPF_LDX | BPF_MEM | BPF_H),
1911 .dst_reg = EBPF_REG_1,
1912 .src_reg = EBPF_REG_10,
1913 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1914 offsetof(struct dummy_offset, u16)),
1917 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1918 .dst_reg = EBPF_REG_0,
1919 .src_reg = EBPF_REG_1,
1922 .code = (BPF_LDX | BPF_MEM | BPF_B),
1923 .dst_reg = EBPF_REG_1,
1924 .src_reg = EBPF_REG_10,
1925 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1926 offsetof(struct dummy_offset, u8)),
1929 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1930 .dst_reg = EBPF_REG_0,
1931 .src_reg = EBPF_REG_1,
1934 .code = (BPF_JMP | EBPF_EXIT),
1940 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1956 test_call2_check(uint64_t rc, const void *arg)
1959 struct dummy_offset a, b;
1963 dummy_func2(&a, &b);
1964 v = a.u64 + a.u32 + b.u16 + b.u8;
1966 return cmp_res(__func__, v, rc, arg, arg, 0);
1969 static const struct rte_bpf_xsym test_call2_xsym[] = {
1971 .name = RTE_STR(dummy_func2),
1972 .type = RTE_BPF_XTYPE_FUNC,
1974 .val = (void *)dummy_func2,
1978 .type = RTE_BPF_ARG_PTR,
1979 .size = sizeof(struct dummy_offset),
1982 .type = RTE_BPF_ARG_PTR,
1983 .size = sizeof(struct dummy_offset),
1990 static const struct ebpf_insn test_call3_prog[] = {
1993 .code = (BPF_JMP | EBPF_CALL),
1997 .code = (BPF_LDX | BPF_MEM | BPF_B),
1998 .dst_reg = EBPF_REG_2,
1999 .src_reg = EBPF_REG_0,
2000 .off = offsetof(struct dummy_offset, u8),
2003 .code = (BPF_LDX | BPF_MEM | BPF_H),
2004 .dst_reg = EBPF_REG_3,
2005 .src_reg = EBPF_REG_0,
2006 .off = offsetof(struct dummy_offset, u16),
2009 .code = (BPF_LDX | BPF_MEM | BPF_W),
2010 .dst_reg = EBPF_REG_4,
2011 .src_reg = EBPF_REG_0,
2012 .off = offsetof(struct dummy_offset, u32),
2015 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
2016 .dst_reg = EBPF_REG_0,
2017 .src_reg = EBPF_REG_0,
2018 .off = offsetof(struct dummy_offset, u64),
2022 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2023 .dst_reg = EBPF_REG_0,
2024 .src_reg = EBPF_REG_4,
2027 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2028 .dst_reg = EBPF_REG_0,
2029 .src_reg = EBPF_REG_3,
2032 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2033 .dst_reg = EBPF_REG_0,
2034 .src_reg = EBPF_REG_2,
2037 .code = (BPF_JMP | EBPF_EXIT),
2041 static const struct dummy_offset *
2042 dummy_func3(const struct dummy_vect8 *p)
2044 return &p->in[RTE_DIM(p->in) - 1];
2048 test_call3_prepare(void *arg)
2050 struct dummy_vect8 *pv;
2051 struct dummy_offset *df;
2054 df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv);
2056 memset(pv, 0, sizeof(*pv));
2057 df->u64 = (int32_t)TEST_FILL_1;
2064 test_call3_check(uint64_t rc, const void *arg)
2067 const struct dummy_vect8 *pv;
2068 const struct dummy_offset *dft;
2071 dft = dummy_func3(pv);
2078 return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv));
2081 static const struct rte_bpf_xsym test_call3_xsym[] = {
2083 .name = RTE_STR(dummy_func3),
2084 .type = RTE_BPF_XTYPE_FUNC,
2086 .val = (void *)dummy_func3,
2090 .type = RTE_BPF_ARG_PTR,
2091 .size = sizeof(struct dummy_vect8),
2095 .type = RTE_BPF_ARG_PTR,
2096 .size = sizeof(struct dummy_offset),
2102 /* Test for stack corruption in multiple function calls */
2103 static const struct ebpf_insn test_call4_prog[] = {
2105 .code = (BPF_ST | BPF_MEM | BPF_B),
2106 .dst_reg = EBPF_REG_10,
2111 .code = (BPF_ST | BPF_MEM | BPF_B),
2112 .dst_reg = EBPF_REG_10,
2117 .code = (BPF_ST | BPF_MEM | BPF_B),
2118 .dst_reg = EBPF_REG_10,
2123 .code = (BPF_ST | BPF_MEM | BPF_B),
2124 .dst_reg = EBPF_REG_10,
2129 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2130 .dst_reg = EBPF_REG_1,
2131 .src_reg = EBPF_REG_10,
2134 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2135 .dst_reg = EBPF_REG_2,
2139 .code = (EBPF_ALU64 | BPF_SUB | BPF_X),
2140 .dst_reg = EBPF_REG_1,
2141 .src_reg = EBPF_REG_2,
2144 .code = (BPF_JMP | EBPF_CALL),
2148 .code = (BPF_LDX | BPF_MEM | BPF_B),
2149 .dst_reg = EBPF_REG_1,
2150 .src_reg = EBPF_REG_10,
2154 .code = (BPF_LDX | BPF_MEM | BPF_B),
2155 .dst_reg = EBPF_REG_2,
2156 .src_reg = EBPF_REG_10,
2160 .code = (BPF_LDX | BPF_MEM | BPF_B),
2161 .dst_reg = EBPF_REG_3,
2162 .src_reg = EBPF_REG_10,
2166 .code = (BPF_LDX | BPF_MEM | BPF_B),
2167 .dst_reg = EBPF_REG_4,
2168 .src_reg = EBPF_REG_10,
2172 .code = (BPF_JMP | EBPF_CALL),
2176 .code = (EBPF_ALU64 | BPF_XOR | BPF_K),
2177 .dst_reg = EBPF_REG_0,
2178 .imm = TEST_MEMFROB,
2181 .code = (BPF_JMP | EBPF_EXIT),
2185 /* Gathering the bytes together */
2187 dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
2189 return (a << 24) | (b << 16) | (c << 8) | (d << 0);
2192 /* Implementation of memfrob */
2194 dummy_func4_0(uint32_t *s, uint8_t n)
2196 char *p = (char *) s;
2204 test_call4_check(uint64_t rc, const void *arg)
2206 uint8_t a[4] = {1, 2, 3, 4};
2211 s = dummy_func4_0((uint32_t *)a, 4);
2213 s = dummy_func4_1(a[0], a[1], a[2], a[3]);
2215 v = s ^ TEST_MEMFROB;
2217 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2220 static const struct rte_bpf_xsym test_call4_xsym[] = {
2222 .name = RTE_STR(dummy_func4_0),
2223 .type = RTE_BPF_XTYPE_FUNC,
2225 .val = (void *)dummy_func4_0,
2229 .type = RTE_BPF_ARG_PTR,
2230 .size = 4 * sizeof(uint8_t),
2233 .type = RTE_BPF_ARG_RAW,
2234 .size = sizeof(uint8_t),
2238 .type = RTE_BPF_ARG_RAW,
2239 .size = sizeof(uint32_t),
2244 .name = RTE_STR(dummy_func4_1),
2245 .type = RTE_BPF_XTYPE_FUNC,
2247 .val = (void *)dummy_func4_1,
2251 .type = RTE_BPF_ARG_RAW,
2252 .size = sizeof(uint8_t),
2255 .type = RTE_BPF_ARG_RAW,
2256 .size = sizeof(uint8_t),
2259 .type = RTE_BPF_ARG_RAW,
2260 .size = sizeof(uint8_t),
2263 .type = RTE_BPF_ARG_RAW,
2264 .size = sizeof(uint8_t),
2268 .type = RTE_BPF_ARG_RAW,
2269 .size = sizeof(uint32_t),
2275 /* string compare test case */
2276 static const struct ebpf_insn test_call5_prog[] = {
2279 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2280 .dst_reg = EBPF_REG_1,
2284 .code = (BPF_STX | BPF_MEM | BPF_W),
2285 .dst_reg = EBPF_REG_10,
2286 .src_reg = EBPF_REG_1,
2290 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2291 .dst_reg = EBPF_REG_6,
2295 .code = (BPF_STX | BPF_MEM | BPF_B),
2296 .dst_reg = EBPF_REG_10,
2297 .src_reg = EBPF_REG_6,
2301 .code = (BPF_STX | BPF_MEM | BPF_W),
2302 .dst_reg = EBPF_REG_10,
2303 .src_reg = EBPF_REG_6,
2307 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2308 .dst_reg = EBPF_REG_1,
2312 .code = (BPF_STX | BPF_MEM | BPF_W),
2313 .dst_reg = EBPF_REG_10,
2314 .src_reg = EBPF_REG_1,
2318 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2319 .dst_reg = EBPF_REG_1,
2320 .src_reg = EBPF_REG_10,
2323 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2324 .dst_reg = EBPF_REG_1,
2328 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2329 .dst_reg = EBPF_REG_2,
2330 .src_reg = EBPF_REG_1,
2333 .code = (BPF_JMP | EBPF_CALL),
2337 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2338 .dst_reg = EBPF_REG_1,
2339 .src_reg = EBPF_REG_0,
2342 .code = (BPF_ALU | EBPF_MOV | BPF_K),
2343 .dst_reg = EBPF_REG_0,
2347 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2348 .dst_reg = EBPF_REG_1,
2352 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2353 .dst_reg = EBPF_REG_1,
2357 .code = (BPF_JMP | EBPF_JNE | BPF_K),
2358 .dst_reg = EBPF_REG_1,
2363 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2364 .dst_reg = EBPF_REG_1,
2365 .src_reg = EBPF_REG_10,
2368 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2369 .dst_reg = EBPF_REG_1,
2373 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2374 .dst_reg = EBPF_REG_2,
2375 .src_reg = EBPF_REG_10,
2378 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2379 .dst_reg = EBPF_REG_2,
2383 .code = (BPF_JMP | EBPF_CALL),
2387 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2388 .dst_reg = EBPF_REG_1,
2389 .src_reg = EBPF_REG_0,
2392 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2393 .dst_reg = EBPF_REG_1,
2397 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2398 .dst_reg = EBPF_REG_1,
2402 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2403 .dst_reg = EBPF_REG_0,
2404 .src_reg = EBPF_REG_1,
2407 .code = (BPF_JMP | BPF_JEQ | BPF_X),
2408 .dst_reg = EBPF_REG_1,
2409 .src_reg = EBPF_REG_6,
2413 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2414 .dst_reg = EBPF_REG_0,
2418 .code = (BPF_JMP | EBPF_EXIT),
2422 /* String comparison implementation, return 0 if equal else difference */
2424 dummy_func5(const char *s1, const char *s2)
2426 while (*s1 && (*s1 == *s2)) {
2430 return *(const unsigned char *)s1 - *(const unsigned char *)s2;
2434 test_call5_check(uint64_t rc, const void *arg)
2442 v = dummy_func5(a, a);
2448 v = dummy_func5(a, b);
2455 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2458 static const struct rte_bpf_xsym test_call5_xsym[] = {
2460 .name = RTE_STR(dummy_func5),
2461 .type = RTE_BPF_XTYPE_FUNC,
2463 .val = (void *)dummy_func5,
2467 .type = RTE_BPF_ARG_PTR,
2468 .size = sizeof(char),
2471 .type = RTE_BPF_ARG_PTR,
2472 .size = sizeof(char),
2476 .type = RTE_BPF_ARG_RAW,
2477 .size = sizeof(uint32_t),
2483 /* load mbuf (BPF_ABS/BPF_IND) test-cases */
2484 static const struct ebpf_insn test_ld_mbuf1_prog[] = {
2486 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2488 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2489 .dst_reg = EBPF_REG_6,
2490 .src_reg = EBPF_REG_1,
2492 /* load IPv4 version and IHL */
2494 .code = (BPF_LD | BPF_ABS | BPF_B),
2495 .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2497 /* check IP version */
2499 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2500 .dst_reg = EBPF_REG_2,
2501 .src_reg = EBPF_REG_0,
2504 .code = (BPF_ALU | BPF_AND | BPF_K),
2505 .dst_reg = EBPF_REG_2,
2509 .code = (BPF_JMP | BPF_JEQ | BPF_K),
2510 .dst_reg = EBPF_REG_2,
2511 .imm = IPVERSION << 4,
2514 /* invalid IP version, return 0 */
2516 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2517 .dst_reg = EBPF_REG_0,
2518 .src_reg = EBPF_REG_0,
2521 .code = (BPF_JMP | EBPF_EXIT),
2523 /* load 3-rd byte of IP data */
2525 .code = (BPF_ALU | BPF_AND | BPF_K),
2526 .dst_reg = EBPF_REG_0,
2527 .imm = RTE_IPV4_HDR_IHL_MASK,
2530 .code = (BPF_ALU | BPF_LSH | BPF_K),
2531 .dst_reg = EBPF_REG_0,
2535 .code = (BPF_LD | BPF_IND | BPF_B),
2536 .src_reg = EBPF_REG_0,
2540 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2541 .dst_reg = EBPF_REG_7,
2542 .src_reg = EBPF_REG_0,
2544 /* load IPv4 src addr */
2546 .code = (BPF_LD | BPF_ABS | BPF_W),
2547 .imm = offsetof(struct rte_ipv4_hdr, src_addr),
2550 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2551 .dst_reg = EBPF_REG_7,
2552 .src_reg = EBPF_REG_0,
2554 /* load IPv4 total length */
2556 .code = (BPF_LD | BPF_ABS | BPF_H),
2557 .imm = offsetof(struct rte_ipv4_hdr, total_length),
2560 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2561 .dst_reg = EBPF_REG_8,
2562 .src_reg = EBPF_REG_0,
2564 /* load last 4 bytes of IP data */
2566 .code = (BPF_LD | BPF_IND | BPF_W),
2567 .src_reg = EBPF_REG_8,
2568 .imm = -(int32_t)sizeof(uint32_t),
2571 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2572 .dst_reg = EBPF_REG_7,
2573 .src_reg = EBPF_REG_0,
2575 /* load 2 bytes from the middle of IP data */
2577 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2578 .dst_reg = EBPF_REG_8,
2582 .code = (BPF_LD | BPF_IND | BPF_H),
2583 .src_reg = EBPF_REG_8,
2586 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2587 .dst_reg = EBPF_REG_0,
2588 .src_reg = EBPF_REG_7,
2591 .code = (BPF_JMP | EBPF_EXIT),
2596 dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
2603 mb->buf_iova = (uintptr_t)buf;
2604 mb->buf_len = buf_len;
2605 rte_mbuf_refcnt_set(mb, 1);
2607 /* set pool pointer to dummy value, test doesn't use it */
2608 mb->pool = (void *)buf;
2610 rte_pktmbuf_reset(mb);
2611 db = (uint8_t *)rte_pktmbuf_append(mb, data_len);
2613 for (i = 0; i != data_len; i++)
2618 test_ld_mbuf1_prepare(void *arg)
2620 struct dummy_mbuf *dm;
2621 struct rte_ipv4_hdr *ph;
2623 const uint32_t plen = 400;
2624 const struct rte_ipv4_hdr iph = {
2625 .version_ihl = RTE_IPV4_VHL_DEF,
2626 .total_length = rte_cpu_to_be_16(plen),
2627 .time_to_live = IPDEFTTL,
2628 .next_proto_id = IPPROTO_RAW,
2629 .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
2630 .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
2634 memset(dm, 0, sizeof(*dm));
2636 dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]),
2638 dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]),
2641 rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]);
2643 ph = rte_pktmbuf_mtod(dm->mb, typeof(ph));
2644 memcpy(ph, &iph, sizeof(iph));
2648 test_ld_mbuf1(const struct rte_mbuf *pkt)
2652 const uint16_t *p16;
2653 const uint32_t *p32;
2654 struct dummy_offset dof;
2656 /* load IPv4 version and IHL */
2657 p8 = rte_pktmbuf_read(pkt,
2658 offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8),
2663 /* check IP version */
2664 if ((p8[0] & 0xf0) != IPVERSION << 4)
2667 n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
2669 /* load 3-rd byte of IP data */
2670 p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof);
2676 /* load IPv4 src addr */
2677 p32 = rte_pktmbuf_read(pkt,
2678 offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32),
2683 v += rte_be_to_cpu_32(p32[0]);
2685 /* load IPv4 total length */
2686 p16 = rte_pktmbuf_read(pkt,
2687 offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16),
2692 n = rte_be_to_cpu_16(p16[0]);
2694 /* load last 4 bytes of IP data */
2695 p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof);
2699 v += rte_be_to_cpu_32(p32[0]);
2701 /* load 2 bytes from the middle of IP data */
2702 p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof);
2706 v += rte_be_to_cpu_16(p16[0]);
2711 test_ld_mbuf1_check(uint64_t rc, const void *arg)
2713 const struct dummy_mbuf *dm;
2717 v = test_ld_mbuf1(dm->mb);
2718 return cmp_res(__func__, v, rc, arg, arg, 0);
2722 * same as ld_mbuf1, but then truncate the mbuf by 1B,
2723 * so load of last 4B fail.
2726 test_ld_mbuf2_prepare(void *arg)
2728 struct dummy_mbuf *dm;
2730 test_ld_mbuf1_prepare(arg);
2732 rte_pktmbuf_trim(dm->mb, 1);
2736 test_ld_mbuf2_check(uint64_t rc, const void *arg)
2738 return cmp_res(__func__, 0, rc, arg, arg, 0);
2741 /* same as test_ld_mbuf1, but now store intermediate results on the stack */
2742 static const struct ebpf_insn test_ld_mbuf3_prog[] = {
2744 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2746 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2747 .dst_reg = EBPF_REG_6,
2748 .src_reg = EBPF_REG_1,
2750 /* load IPv4 version and IHL */
2752 .code = (BPF_LD | BPF_ABS | BPF_B),
2753 .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2755 /* check IP version */
2757 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2758 .dst_reg = EBPF_REG_2,
2759 .src_reg = EBPF_REG_0,
2762 .code = (BPF_ALU | BPF_AND | BPF_K),
2763 .dst_reg = EBPF_REG_2,
2767 .code = (BPF_JMP | BPF_JEQ | BPF_K),
2768 .dst_reg = EBPF_REG_2,
2769 .imm = IPVERSION << 4,
2772 /* invalid IP version, return 0 */
2774 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2775 .dst_reg = EBPF_REG_0,
2776 .src_reg = EBPF_REG_0,
2779 .code = (BPF_JMP | EBPF_EXIT),
2781 /* load 3-rd byte of IP data */
2783 .code = (BPF_ALU | BPF_AND | BPF_K),
2784 .dst_reg = EBPF_REG_0,
2785 .imm = RTE_IPV4_HDR_IHL_MASK,
2788 .code = (BPF_ALU | BPF_LSH | BPF_K),
2789 .dst_reg = EBPF_REG_0,
2793 .code = (BPF_LD | BPF_IND | BPF_B),
2794 .src_reg = EBPF_REG_0,
2798 .code = (BPF_STX | BPF_MEM | BPF_B),
2799 .dst_reg = EBPF_REG_10,
2800 .src_reg = EBPF_REG_0,
2801 .off = (int16_t)(offsetof(struct dummy_offset, u8) -
2802 sizeof(struct dummy_offset)),
2804 /* load IPv4 src addr */
2806 .code = (BPF_LD | BPF_ABS | BPF_W),
2807 .imm = offsetof(struct rte_ipv4_hdr, src_addr),
2810 .code = (BPF_STX | BPF_MEM | BPF_W),
2811 .dst_reg = EBPF_REG_10,
2812 .src_reg = EBPF_REG_0,
2813 .off = (int16_t)(offsetof(struct dummy_offset, u32) -
2814 sizeof(struct dummy_offset)),
2816 /* load IPv4 total length */
2818 .code = (BPF_LD | BPF_ABS | BPF_H),
2819 .imm = offsetof(struct rte_ipv4_hdr, total_length),
2822 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2823 .dst_reg = EBPF_REG_8,
2824 .src_reg = EBPF_REG_0,
2826 /* load last 4 bytes of IP data */
2828 .code = (BPF_LD | BPF_IND | BPF_W),
2829 .src_reg = EBPF_REG_8,
2830 .imm = -(int32_t)sizeof(uint32_t),
2833 .code = (BPF_STX | BPF_MEM | EBPF_DW),
2834 .dst_reg = EBPF_REG_10,
2835 .src_reg = EBPF_REG_0,
2836 .off = (int16_t)(offsetof(struct dummy_offset, u64) -
2837 sizeof(struct dummy_offset)),
2839 /* load 2 bytes from the middle of IP data */
2841 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2842 .dst_reg = EBPF_REG_8,
2846 .code = (BPF_LD | BPF_IND | BPF_H),
2847 .src_reg = EBPF_REG_8,
2850 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
2851 .dst_reg = EBPF_REG_1,
2852 .src_reg = EBPF_REG_10,
2853 .off = (int16_t)(offsetof(struct dummy_offset, u64) -
2854 sizeof(struct dummy_offset)),
2857 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2858 .dst_reg = EBPF_REG_0,
2859 .src_reg = EBPF_REG_1,
2862 .code = (BPF_LDX | BPF_MEM | BPF_W),
2863 .dst_reg = EBPF_REG_1,
2864 .src_reg = EBPF_REG_10,
2865 .off = (int16_t)(offsetof(struct dummy_offset, u32) -
2866 sizeof(struct dummy_offset)),
2869 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2870 .dst_reg = EBPF_REG_0,
2871 .src_reg = EBPF_REG_1,
2874 .code = (BPF_LDX | BPF_MEM | BPF_B),
2875 .dst_reg = EBPF_REG_1,
2876 .src_reg = EBPF_REG_10,
2877 .off = (int16_t)(offsetof(struct dummy_offset, u8) -
2878 sizeof(struct dummy_offset)),
2881 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2882 .dst_reg = EBPF_REG_0,
2883 .src_reg = EBPF_REG_1,
2886 .code = (BPF_JMP | EBPF_EXIT),
2890 /* all bpf test cases */
2891 static const struct bpf_test tests[] = {
2893 .name = "test_store1",
2894 .arg_sz = sizeof(struct dummy_offset),
2896 .ins = test_store1_prog,
2897 .nb_ins = RTE_DIM(test_store1_prog),
2899 .type = RTE_BPF_ARG_PTR,
2900 .size = sizeof(struct dummy_offset),
2903 .prepare = test_store1_prepare,
2904 .check_result = test_store1_check,
2907 .name = "test_store2",
2908 .arg_sz = sizeof(struct dummy_offset),
2910 .ins = test_store2_prog,
2911 .nb_ins = RTE_DIM(test_store2_prog),
2913 .type = RTE_BPF_ARG_PTR,
2914 .size = sizeof(struct dummy_offset),
2917 .prepare = test_store1_prepare,
2918 .check_result = test_store1_check,
2921 .name = "test_load1",
2922 .arg_sz = sizeof(struct dummy_offset),
2924 .ins = test_load1_prog,
2925 .nb_ins = RTE_DIM(test_load1_prog),
2927 .type = RTE_BPF_ARG_PTR,
2928 .size = sizeof(struct dummy_offset),
2931 .prepare = test_load1_prepare,
2932 .check_result = test_load1_check,
2935 .name = "test_ldimm1",
2936 .arg_sz = sizeof(struct dummy_offset),
2938 .ins = test_ldimm1_prog,
2939 .nb_ins = RTE_DIM(test_ldimm1_prog),
2941 .type = RTE_BPF_ARG_PTR,
2942 .size = sizeof(struct dummy_offset),
2945 .prepare = test_store1_prepare,
2946 .check_result = test_ldimm1_check,
2949 .name = "test_mul1",
2950 .arg_sz = sizeof(struct dummy_vect8),
2952 .ins = test_mul1_prog,
2953 .nb_ins = RTE_DIM(test_mul1_prog),
2955 .type = RTE_BPF_ARG_PTR,
2956 .size = sizeof(struct dummy_vect8),
2959 .prepare = test_mul1_prepare,
2960 .check_result = test_mul1_check,
2963 .name = "test_shift1",
2964 .arg_sz = sizeof(struct dummy_vect8),
2966 .ins = test_shift1_prog,
2967 .nb_ins = RTE_DIM(test_shift1_prog),
2969 .type = RTE_BPF_ARG_PTR,
2970 .size = sizeof(struct dummy_vect8),
2973 .prepare = test_shift1_prepare,
2974 .check_result = test_shift1_check,
2977 .name = "test_jump1",
2978 .arg_sz = sizeof(struct dummy_vect8),
2980 .ins = test_jump1_prog,
2981 .nb_ins = RTE_DIM(test_jump1_prog),
2983 .type = RTE_BPF_ARG_PTR,
2984 .size = sizeof(struct dummy_vect8),
2987 .prepare = test_jump1_prepare,
2988 .check_result = test_jump1_check,
2991 .name = "test_jump2",
2992 .arg_sz = sizeof(struct dummy_net),
2994 .ins = test_jump2_prog,
2995 .nb_ins = RTE_DIM(test_jump2_prog),
2997 .type = RTE_BPF_ARG_PTR,
2998 .size = sizeof(struct dummy_net),
3001 .prepare = test_jump2_prepare,
3002 .check_result = test_jump2_check,
3005 .name = "test_alu1",
3006 .arg_sz = sizeof(struct dummy_vect8),
3008 .ins = test_alu1_prog,
3009 .nb_ins = RTE_DIM(test_alu1_prog),
3011 .type = RTE_BPF_ARG_PTR,
3012 .size = sizeof(struct dummy_vect8),
3015 .prepare = test_jump1_prepare,
3016 .check_result = test_alu1_check,
3019 .name = "test_bele1",
3020 .arg_sz = sizeof(struct dummy_vect8),
3022 .ins = test_bele1_prog,
3023 .nb_ins = RTE_DIM(test_bele1_prog),
3025 .type = RTE_BPF_ARG_PTR,
3026 .size = sizeof(struct dummy_vect8),
3029 .prepare = test_bele1_prepare,
3030 .check_result = test_bele1_check,
3033 .name = "test_xadd1",
3034 .arg_sz = sizeof(struct dummy_offset),
3036 .ins = test_xadd1_prog,
3037 .nb_ins = RTE_DIM(test_xadd1_prog),
3039 .type = RTE_BPF_ARG_PTR,
3040 .size = sizeof(struct dummy_offset),
3043 .prepare = test_store1_prepare,
3044 .check_result = test_xadd1_check,
3047 .name = "test_div1",
3048 .arg_sz = sizeof(struct dummy_vect8),
3050 .ins = test_div1_prog,
3051 .nb_ins = RTE_DIM(test_div1_prog),
3053 .type = RTE_BPF_ARG_PTR,
3054 .size = sizeof(struct dummy_vect8),
3057 .prepare = test_mul1_prepare,
3058 .check_result = test_div1_check,
3061 .name = "test_call1",
3062 .arg_sz = sizeof(struct dummy_offset),
3064 .ins = test_call1_prog,
3065 .nb_ins = RTE_DIM(test_call1_prog),
3067 .type = RTE_BPF_ARG_PTR,
3068 .size = sizeof(struct dummy_offset),
3070 .xsym = test_call1_xsym,
3071 .nb_xsym = RTE_DIM(test_call1_xsym),
3073 .prepare = test_load1_prepare,
3074 .check_result = test_call1_check,
3075 /* for now don't support function calls on 32 bit platform */
3076 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3079 .name = "test_call2",
3080 .arg_sz = sizeof(struct dummy_offset),
3082 .ins = test_call2_prog,
3083 .nb_ins = RTE_DIM(test_call2_prog),
3085 .type = RTE_BPF_ARG_PTR,
3086 .size = sizeof(struct dummy_offset),
3088 .xsym = test_call2_xsym,
3089 .nb_xsym = RTE_DIM(test_call2_xsym),
3091 .prepare = test_store1_prepare,
3092 .check_result = test_call2_check,
3093 /* for now don't support function calls on 32 bit platform */
3094 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3097 .name = "test_call3",
3098 .arg_sz = sizeof(struct dummy_vect8),
3100 .ins = test_call3_prog,
3101 .nb_ins = RTE_DIM(test_call3_prog),
3103 .type = RTE_BPF_ARG_PTR,
3104 .size = sizeof(struct dummy_vect8),
3106 .xsym = test_call3_xsym,
3107 .nb_xsym = RTE_DIM(test_call3_xsym),
3109 .prepare = test_call3_prepare,
3110 .check_result = test_call3_check,
3111 /* for now don't support function calls on 32 bit platform */
3112 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3115 .name = "test_call4",
3116 .arg_sz = sizeof(struct dummy_offset),
3118 .ins = test_call4_prog,
3119 .nb_ins = RTE_DIM(test_call4_prog),
3121 .type = RTE_BPF_ARG_PTR,
3122 .size = 2 * sizeof(struct dummy_offset),
3124 .xsym = test_call4_xsym,
3125 .nb_xsym = RTE_DIM(test_call4_xsym),
3127 .prepare = test_store1_prepare,
3128 .check_result = test_call4_check,
3129 /* for now don't support function calls on 32 bit platform */
3130 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3133 .name = "test_call5",
3134 .arg_sz = sizeof(struct dummy_offset),
3136 .ins = test_call5_prog,
3137 .nb_ins = RTE_DIM(test_call5_prog),
3139 .type = RTE_BPF_ARG_PTR,
3140 .size = sizeof(struct dummy_offset),
3142 .xsym = test_call5_xsym,
3143 .nb_xsym = RTE_DIM(test_call5_xsym),
3145 .prepare = test_store1_prepare,
3146 .check_result = test_call5_check,
3147 /* for now don't support function calls on 32 bit platform */
3148 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3151 .name = "test_ld_mbuf1",
3152 .arg_sz = sizeof(struct dummy_mbuf),
3154 .ins = test_ld_mbuf1_prog,
3155 .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3157 .type = RTE_BPF_ARG_PTR_MBUF,
3158 .buf_size = sizeof(struct dummy_mbuf),
3161 .prepare = test_ld_mbuf1_prepare,
3162 .check_result = test_ld_mbuf1_check,
3163 /* mbuf as input argument is not supported on 32 bit platform */
3164 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3167 .name = "test_ld_mbuf2",
3168 .arg_sz = sizeof(struct dummy_mbuf),
3170 .ins = test_ld_mbuf1_prog,
3171 .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3173 .type = RTE_BPF_ARG_PTR_MBUF,
3174 .buf_size = sizeof(struct dummy_mbuf),
3177 .prepare = test_ld_mbuf2_prepare,
3178 .check_result = test_ld_mbuf2_check,
3179 /* mbuf as input argument is not supported on 32 bit platform */
3180 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3183 .name = "test_ld_mbuf3",
3184 .arg_sz = sizeof(struct dummy_mbuf),
3186 .ins = test_ld_mbuf3_prog,
3187 .nb_ins = RTE_DIM(test_ld_mbuf3_prog),
3189 .type = RTE_BPF_ARG_PTR_MBUF,
3190 .buf_size = sizeof(struct dummy_mbuf),
3193 .prepare = test_ld_mbuf1_prepare,
3194 .check_result = test_ld_mbuf1_check,
3195 /* mbuf as input argument is not supported on 32 bit platform */
3196 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3201 run_test(const struct bpf_test *tst)
3205 struct rte_bpf *bpf;
3206 struct rte_bpf_jit jit;
3207 uint8_t tbuf[tst->arg_sz];
3209 printf("%s(%s) start\n", __func__, tst->name);
3211 bpf = rte_bpf_load(&tst->prm);
3213 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3214 __func__, __LINE__, rte_errno, strerror(rte_errno));
3219 rc = rte_bpf_exec(bpf, tbuf);
3220 ret = tst->check_result(rc, tbuf);
3222 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
3223 __func__, __LINE__, tst->name, ret, strerror(ret));
3226 /* repeat the same test with jit, when possible */
3227 rte_bpf_get_jit(bpf, &jit);
3228 if (jit.func != NULL) {
3231 rc = jit.func(tbuf);
3232 rv = tst->check_result(rc, tbuf);
3235 printf("%s@%d: check_result(%s) failed, "
3237 __func__, __LINE__, tst->name,
3242 rte_bpf_destroy(bpf);
3254 for (i = 0; i != RTE_DIM(tests); i++) {
3255 rv = run_test(tests + i);
3256 if (tests[i].allow_fail == 0)
3263 #endif /* !RTE_LIB_BPF */
3265 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
3267 #ifdef RTE_HAS_LIBPCAP
3268 #include <pcap/pcap.h>
3271 test_bpf_dump(struct bpf_program *cbf, const struct rte_bpf_prm *prm)
3273 printf("cBPF program (%u insns)\n", cbf->bf_len);
3276 printf("\neBPF program (%u insns)\n", prm->nb_ins);
3277 rte_bpf_dump(stdout, prm->ins, prm->nb_ins);
3281 test_bpf_match(pcap_t *pcap, const char *str,
3282 struct rte_mbuf *mb)
3284 struct bpf_program fcode;
3285 struct rte_bpf_prm *prm = NULL;
3286 struct rte_bpf *bpf = NULL;
3290 if (pcap_compile(pcap, &fcode, str, 1, PCAP_NETMASK_UNKNOWN)) {
3291 printf("%s@%d: pcap_compile(\"%s\") failed: %s;\n",
3292 __func__, __LINE__, str, pcap_geterr(pcap));
3296 prm = rte_bpf_convert(&fcode);
3298 printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n",
3299 __func__, __LINE__, str, rte_errno, strerror(rte_errno));
3303 bpf = rte_bpf_load(prm);
3305 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3306 __func__, __LINE__, rte_errno, strerror(rte_errno));
3310 rc = rte_bpf_exec(bpf, mb);
3311 /* The return code from bpf capture filter is non-zero if matched */
3315 rte_bpf_destroy(bpf);
3317 pcap_freecode(&fcode);
3321 /* Basic sanity test can we match a IP packet */
3323 test_bpf_filter_sanity(pcap_t *pcap)
3325 const uint32_t plen = 100;
3326 struct rte_mbuf mb, *m;
3327 uint8_t tbuf[RTE_MBUF_DEFAULT_BUF_SIZE];
3329 struct rte_ether_hdr eth_hdr;
3330 struct rte_ipv4_hdr ip_hdr;
3333 dummy_mbuf_prep(&mb, tbuf, sizeof(tbuf), plen);
3336 hdr = rte_pktmbuf_mtod(m, typeof(hdr));
3337 hdr->eth_hdr = (struct rte_ether_hdr) {
3338 .dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3339 .ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4),
3341 hdr->ip_hdr = (struct rte_ipv4_hdr) {
3342 .version_ihl = RTE_IPV4_VHL_DEF,
3343 .total_length = rte_cpu_to_be_16(plen),
3344 .time_to_live = IPDEFTTL,
3345 .next_proto_id = IPPROTO_RAW,
3346 .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
3347 .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
3350 if (test_bpf_match(pcap, "ip", m) != 0) {
3351 printf("%s@%d: filter \"ip\" doesn't match test data\n",
3352 __func__, __LINE__);
3355 if (test_bpf_match(pcap, "not ip", m) == 0) {
3356 printf("%s@%d: filter \"not ip\" does match test data\n",
3357 __func__, __LINE__);
3365 * Some sample pcap filter strings from
3366 * https://wiki.wireshark.org/CaptureFilters
3368 static const char * const sample_filters[] = {
3370 "net 192.168.0.0/24",
3371 "src net 192.168.0.0/24",
3372 "src net 192.168.0.0 mask 255.255.255.0",
3373 "dst net 192.168.0.0/24",
3374 "dst net 192.168.0.0 mask 255.255.255.0",
3376 "host 192.0.2.1 and not (port 80 or port 25)",
3377 "host 2001:4b98:db0::8 and not port 80 and not port 25",
3378 "port not 53 and not arp",
3379 "(tcp[0:2] > 1500 and tcp[0:2] < 1550) or (tcp[2:2] > 1500 and tcp[2:2] < 1550)",
3380 "ether proto 0x888e",
3381 "ether[0] & 1 = 0 and ip[16] >= 224",
3382 "icmp[icmptype] != icmp-echo and icmp[icmptype] != icmp-echoreply",
3383 "tcp[tcpflags] & (tcp-syn|tcp-fin) != 0 and not src and dst net 127.0.0.1",
3384 "not ether dst 01:80:c2:00:00:0e",
3385 "not broadcast and not multicast",
3387 "port 80 and tcp[((tcp[12:1] & 0xf0) >> 2):4] = 0x47455420",
3389 "dst port 135 and tcp port 135 and ip[2:2]==48",
3390 "icmp[icmptype]==icmp-echo and ip[2:2]==92 and icmp[8:4]==0xAAAAAAAA",
3391 "dst port 135 or dst port 445 or dst port 1433"
3392 " and tcp[tcpflags] & (tcp-syn) != 0"
3393 " and tcp[tcpflags] & (tcp-ack) = 0 and src net 192.168.0.0/24",
3394 "tcp src port 443 and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4] = 0x18)"
3395 " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 1] = 0x03)"
3396 " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 2] < 0x04)"
3397 " and ((ip[2:2] - 4 * (ip[0] & 0x0F) - 4 * ((tcp[12] & 0xF0) >> 4) > 69))",
3403 test_bpf_filter(pcap_t *pcap, const char *s)
3405 struct bpf_program fcode;
3406 struct rte_bpf_prm *prm = NULL;
3407 struct rte_bpf *bpf = NULL;
3409 if (pcap_compile(pcap, &fcode, s, 1, PCAP_NETMASK_UNKNOWN)) {
3410 printf("%s@%d: pcap_compile('%s') failed: %s;\n",
3411 __func__, __LINE__, s, pcap_geterr(pcap));
3415 prm = rte_bpf_convert(&fcode);
3417 printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n",
3418 __func__, __LINE__, s, rte_errno, strerror(rte_errno));
3422 bpf = rte_bpf_load(prm);
3424 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3425 __func__, __LINE__, rte_errno, strerror(rte_errno));
3431 rte_bpf_destroy(bpf);
3433 printf("%s \"%s\"\n", __func__, s);
3434 test_bpf_dump(&fcode, prm);
3438 pcap_freecode(&fcode);
3439 return (bpf == NULL) ? -1 : 0;
3443 test_bpf_convert(void)
3449 pcap = pcap_open_dead(DLT_EN10MB, 262144);
3451 printf("pcap_open_dead failed\n");
3455 rc = test_bpf_filter_sanity(pcap);
3456 for (i = 0; i < RTE_DIM(sample_filters); i++)
3457 rc |= test_bpf_filter(pcap, sample_filters[i]);
3463 REGISTER_TEST_COMMAND(bpf_convert_autotest, test_bpf_convert);
3464 #endif /* RTE_HAS_LIBPCAP */