1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_malloc.h>
14 #include <rte_random.h>
15 #include <rte_byteorder.h>
16 #include <rte_errno.h>
18 #include <rte_ether.h>
24 * Basic functional tests for librte_bpf.
25 * The main procedure - load eBPF program, execute it and
26 * compare results with expected values.
37 struct dummy_offset in[8];
38 struct dummy_offset out[8];
42 struct rte_ether_hdr eth_hdr;
43 struct rte_vlan_hdr vlan_hdr;
44 struct rte_ipv4_hdr ip_hdr;
47 #define DUMMY_MBUF_NUM 2
49 /* first mbuf in the packet, should always be at offset 0 */
51 struct rte_mbuf mb[DUMMY_MBUF_NUM];
52 uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE];
55 #define TEST_FILL_1 0xDEADBEEF
58 #define TEST_MUL_2 -100
60 #define TEST_SHIFT_1 15
61 #define TEST_SHIFT_2 33
63 #define TEST_SHIFT32_MASK (CHAR_BIT * sizeof(uint32_t) - 1)
64 #define TEST_SHIFT64_MASK (CHAR_BIT * sizeof(uint64_t) - 1)
67 #define TEST_JCC_2 -123
68 #define TEST_JCC_3 5678
69 #define TEST_JCC_4 TEST_FILL_1
71 #define TEST_IMM_1 UINT64_MAX
72 #define TEST_IMM_2 ((uint64_t)INT64_MIN)
73 #define TEST_IMM_3 ((uint64_t)INT64_MAX + INT32_MAX)
74 #define TEST_IMM_4 ((uint64_t)UINT32_MAX)
75 #define TEST_IMM_5 ((uint64_t)UINT32_MAX + 1)
77 #define TEST_MEMFROB 0x2a2a2a2a
79 #define STRING_GEEK 0x6B656567
80 #define STRING_WEEK 0x6B656577
82 #define TEST_NETMASK 0xffffff00
83 #define TEST_SUBNET 0xaca80200
85 uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
86 uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
88 uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1;
89 uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2;
94 struct rte_bpf_prm prm;
95 void (*prepare)(void *);
96 int (*check_result)(uint64_t, const void *);
101 * Compare return value and result data with expected ones.
102 * Report a failure if they don't match.
105 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
106 const void *exp_res, const void *ret_res, size_t res_sz)
111 if (exp_rc != ret_rc) {
112 printf("%s@%d: invalid return value, expected: 0x%" PRIx64
113 ",result: 0x%" PRIx64 "\n",
114 func, __LINE__, exp_rc, ret_rc);
118 if (memcmp(exp_res, ret_res, res_sz) != 0) {
119 printf("%s: invalid value\n", func);
120 rte_memdump(stdout, "expected", exp_res, res_sz);
121 rte_memdump(stdout, "result", ret_res, res_sz);
128 /* store immediate test-cases */
129 static const struct ebpf_insn test_store1_prog[] = {
131 .code = (BPF_ST | BPF_MEM | BPF_B),
132 .dst_reg = EBPF_REG_1,
133 .off = offsetof(struct dummy_offset, u8),
137 .code = (BPF_ST | BPF_MEM | BPF_H),
138 .dst_reg = EBPF_REG_1,
139 .off = offsetof(struct dummy_offset, u16),
143 .code = (BPF_ST | BPF_MEM | BPF_W),
144 .dst_reg = EBPF_REG_1,
145 .off = offsetof(struct dummy_offset, u32),
149 .code = (BPF_ST | BPF_MEM | EBPF_DW),
150 .dst_reg = EBPF_REG_1,
151 .off = offsetof(struct dummy_offset, u64),
156 .code = (BPF_ALU | EBPF_MOV | BPF_K),
157 .dst_reg = EBPF_REG_0,
161 .code = (BPF_JMP | EBPF_EXIT),
166 test_store1_prepare(void *arg)
168 struct dummy_offset *df;
171 memset(df, 0, sizeof(*df));
175 test_store1_check(uint64_t rc, const void *arg)
177 const struct dummy_offset *dft;
178 struct dummy_offset dfe;
182 memset(&dfe, 0, sizeof(dfe));
183 dfe.u64 = (int32_t)TEST_FILL_1;
188 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
191 /* store register test-cases */
192 static const struct ebpf_insn test_store2_prog[] = {
195 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
196 .dst_reg = EBPF_REG_2,
200 .code = (BPF_STX | BPF_MEM | BPF_B),
201 .dst_reg = EBPF_REG_1,
202 .src_reg = EBPF_REG_2,
203 .off = offsetof(struct dummy_offset, u8),
206 .code = (BPF_STX | BPF_MEM | BPF_H),
207 .dst_reg = EBPF_REG_1,
208 .src_reg = EBPF_REG_2,
209 .off = offsetof(struct dummy_offset, u16),
212 .code = (BPF_STX | BPF_MEM | BPF_W),
213 .dst_reg = EBPF_REG_1,
214 .src_reg = EBPF_REG_2,
215 .off = offsetof(struct dummy_offset, u32),
218 .code = (BPF_STX | BPF_MEM | EBPF_DW),
219 .dst_reg = EBPF_REG_1,
220 .src_reg = EBPF_REG_2,
221 .off = offsetof(struct dummy_offset, u64),
225 .code = (BPF_ALU | EBPF_MOV | BPF_K),
226 .dst_reg = EBPF_REG_0,
230 .code = (BPF_JMP | EBPF_EXIT),
234 /* load test-cases */
235 static const struct ebpf_insn test_load1_prog[] = {
238 .code = (BPF_LDX | BPF_MEM | BPF_B),
239 .dst_reg = EBPF_REG_2,
240 .src_reg = EBPF_REG_1,
241 .off = offsetof(struct dummy_offset, u8),
244 .code = (BPF_LDX | BPF_MEM | BPF_H),
245 .dst_reg = EBPF_REG_3,
246 .src_reg = EBPF_REG_1,
247 .off = offsetof(struct dummy_offset, u16),
250 .code = (BPF_LDX | BPF_MEM | BPF_W),
251 .dst_reg = EBPF_REG_4,
252 .src_reg = EBPF_REG_1,
253 .off = offsetof(struct dummy_offset, u32),
256 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
257 .dst_reg = EBPF_REG_0,
258 .src_reg = EBPF_REG_1,
259 .off = offsetof(struct dummy_offset, u64),
263 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
264 .dst_reg = EBPF_REG_0,
265 .src_reg = EBPF_REG_4,
268 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
269 .dst_reg = EBPF_REG_0,
270 .src_reg = EBPF_REG_3,
273 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
274 .dst_reg = EBPF_REG_0,
275 .src_reg = EBPF_REG_2,
278 .code = (BPF_JMP | EBPF_EXIT),
283 test_load1_prepare(void *arg)
285 struct dummy_offset *df;
289 memset(df, 0, sizeof(*df));
290 df->u64 = (int32_t)TEST_FILL_1;
297 test_load1_check(uint64_t rc, const void *arg)
300 const struct dummy_offset *dft;
308 return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
311 /* load immediate test-cases */
312 static const struct ebpf_insn test_ldimm1_prog[] = {
315 .code = (BPF_LD | BPF_IMM | EBPF_DW),
316 .dst_reg = EBPF_REG_0,
317 .imm = (uint32_t)TEST_IMM_1,
320 .imm = TEST_IMM_1 >> 32,
323 .code = (BPF_LD | BPF_IMM | EBPF_DW),
324 .dst_reg = EBPF_REG_3,
325 .imm = (uint32_t)TEST_IMM_2,
328 .imm = TEST_IMM_2 >> 32,
331 .code = (BPF_LD | BPF_IMM | EBPF_DW),
332 .dst_reg = EBPF_REG_5,
333 .imm = (uint32_t)TEST_IMM_3,
336 .imm = TEST_IMM_3 >> 32,
339 .code = (BPF_LD | BPF_IMM | EBPF_DW),
340 .dst_reg = EBPF_REG_7,
341 .imm = (uint32_t)TEST_IMM_4,
344 .imm = TEST_IMM_4 >> 32,
347 .code = (BPF_LD | BPF_IMM | EBPF_DW),
348 .dst_reg = EBPF_REG_9,
349 .imm = (uint32_t)TEST_IMM_5,
352 .imm = TEST_IMM_5 >> 32,
356 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
357 .dst_reg = EBPF_REG_0,
358 .src_reg = EBPF_REG_3,
361 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
362 .dst_reg = EBPF_REG_0,
363 .src_reg = EBPF_REG_5,
366 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
367 .dst_reg = EBPF_REG_0,
368 .src_reg = EBPF_REG_7,
371 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
372 .dst_reg = EBPF_REG_0,
373 .src_reg = EBPF_REG_9,
376 .code = (BPF_JMP | EBPF_EXIT),
381 test_ldimm1_check(uint64_t rc, const void *arg)
395 return cmp_res(__func__, v1, rc, arg, arg, 0);
399 /* alu mul test-cases */
400 static const struct ebpf_insn test_mul1_prog[] = {
403 .code = (BPF_LDX | BPF_MEM | BPF_W),
404 .dst_reg = EBPF_REG_2,
405 .src_reg = EBPF_REG_1,
406 .off = offsetof(struct dummy_vect8, in[0].u32),
409 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
410 .dst_reg = EBPF_REG_3,
411 .src_reg = EBPF_REG_1,
412 .off = offsetof(struct dummy_vect8, in[1].u64),
415 .code = (BPF_LDX | BPF_MEM | BPF_W),
416 .dst_reg = EBPF_REG_4,
417 .src_reg = EBPF_REG_1,
418 .off = offsetof(struct dummy_vect8, in[2].u32),
421 .code = (BPF_ALU | BPF_MUL | BPF_K),
422 .dst_reg = EBPF_REG_2,
426 .code = (EBPF_ALU64 | BPF_MUL | BPF_K),
427 .dst_reg = EBPF_REG_3,
431 .code = (BPF_ALU | BPF_MUL | BPF_X),
432 .dst_reg = EBPF_REG_4,
433 .src_reg = EBPF_REG_2,
436 .code = (EBPF_ALU64 | BPF_MUL | BPF_X),
437 .dst_reg = EBPF_REG_4,
438 .src_reg = EBPF_REG_3,
441 .code = (BPF_STX | BPF_MEM | EBPF_DW),
442 .dst_reg = EBPF_REG_1,
443 .src_reg = EBPF_REG_2,
444 .off = offsetof(struct dummy_vect8, out[0].u64),
447 .code = (BPF_STX | BPF_MEM | EBPF_DW),
448 .dst_reg = EBPF_REG_1,
449 .src_reg = EBPF_REG_3,
450 .off = offsetof(struct dummy_vect8, out[1].u64),
453 .code = (BPF_STX | BPF_MEM | EBPF_DW),
454 .dst_reg = EBPF_REG_1,
455 .src_reg = EBPF_REG_4,
456 .off = offsetof(struct dummy_vect8, out[2].u64),
460 .code = (BPF_ALU | EBPF_MOV | BPF_K),
461 .dst_reg = EBPF_REG_0,
465 .code = (BPF_JMP | EBPF_EXIT),
470 test_mul1_prepare(void *arg)
472 struct dummy_vect8 *dv;
479 memset(dv, 0, sizeof(*dv));
481 dv->in[1].u64 = v << 12 | v >> 6;
486 test_mul1_check(uint64_t rc, const void *arg)
489 const struct dummy_vect8 *dvt;
490 struct dummy_vect8 dve;
493 memset(&dve, 0, sizeof(dve));
499 r2 = (uint32_t)r2 * TEST_MUL_1;
501 r4 = (uint32_t)(r4 * r2);
508 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
511 /* alu shift test-cases */
512 static const struct ebpf_insn test_shift1_prog[] = {
515 .code = (BPF_LDX | BPF_MEM | BPF_W),
516 .dst_reg = EBPF_REG_2,
517 .src_reg = EBPF_REG_1,
518 .off = offsetof(struct dummy_vect8, in[0].u32),
521 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
522 .dst_reg = EBPF_REG_3,
523 .src_reg = EBPF_REG_1,
524 .off = offsetof(struct dummy_vect8, in[1].u64),
527 .code = (BPF_LDX | BPF_MEM | BPF_W),
528 .dst_reg = EBPF_REG_4,
529 .src_reg = EBPF_REG_1,
530 .off = offsetof(struct dummy_vect8, in[2].u32),
533 .code = (BPF_ALU | BPF_LSH | BPF_K),
534 .dst_reg = EBPF_REG_2,
538 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
539 .dst_reg = EBPF_REG_3,
543 .code = (BPF_STX | BPF_MEM | EBPF_DW),
544 .dst_reg = EBPF_REG_1,
545 .src_reg = EBPF_REG_2,
546 .off = offsetof(struct dummy_vect8, out[0].u64),
549 .code = (BPF_STX | BPF_MEM | EBPF_DW),
550 .dst_reg = EBPF_REG_1,
551 .src_reg = EBPF_REG_3,
552 .off = offsetof(struct dummy_vect8, out[1].u64),
555 .code = (BPF_ALU | BPF_AND | BPF_K),
556 .dst_reg = EBPF_REG_4,
557 .imm = TEST_SHIFT64_MASK,
560 .code = (EBPF_ALU64 | BPF_LSH | BPF_X),
561 .dst_reg = EBPF_REG_3,
562 .src_reg = EBPF_REG_4,
565 .code = (BPF_ALU | BPF_AND | BPF_K),
566 .dst_reg = EBPF_REG_4,
567 .imm = TEST_SHIFT32_MASK,
570 .code = (BPF_ALU | BPF_RSH | BPF_X),
571 .dst_reg = EBPF_REG_2,
572 .src_reg = EBPF_REG_4,
575 .code = (BPF_STX | BPF_MEM | EBPF_DW),
576 .dst_reg = EBPF_REG_1,
577 .src_reg = EBPF_REG_2,
578 .off = offsetof(struct dummy_vect8, out[2].u64),
581 .code = (BPF_STX | BPF_MEM | EBPF_DW),
582 .dst_reg = EBPF_REG_1,
583 .src_reg = EBPF_REG_3,
584 .off = offsetof(struct dummy_vect8, out[3].u64),
587 .code = (BPF_LDX | BPF_MEM | BPF_W),
588 .dst_reg = EBPF_REG_2,
589 .src_reg = EBPF_REG_1,
590 .off = offsetof(struct dummy_vect8, in[0].u32),
593 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
594 .dst_reg = EBPF_REG_3,
595 .src_reg = EBPF_REG_1,
596 .off = offsetof(struct dummy_vect8, in[1].u64),
599 .code = (BPF_LDX | BPF_MEM | BPF_W),
600 .dst_reg = EBPF_REG_4,
601 .src_reg = EBPF_REG_1,
602 .off = offsetof(struct dummy_vect8, in[2].u32),
605 .code = (BPF_ALU | BPF_AND | BPF_K),
606 .dst_reg = EBPF_REG_2,
607 .imm = TEST_SHIFT64_MASK,
610 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
611 .dst_reg = EBPF_REG_3,
612 .src_reg = EBPF_REG_2,
615 .code = (BPF_ALU | BPF_AND | BPF_K),
616 .dst_reg = EBPF_REG_2,
617 .imm = TEST_SHIFT32_MASK,
620 .code = (BPF_ALU | BPF_LSH | BPF_X),
621 .dst_reg = EBPF_REG_4,
622 .src_reg = EBPF_REG_2,
625 .code = (BPF_STX | BPF_MEM | EBPF_DW),
626 .dst_reg = EBPF_REG_1,
627 .src_reg = EBPF_REG_4,
628 .off = offsetof(struct dummy_vect8, out[4].u64),
631 .code = (BPF_STX | BPF_MEM | EBPF_DW),
632 .dst_reg = EBPF_REG_1,
633 .src_reg = EBPF_REG_3,
634 .off = offsetof(struct dummy_vect8, out[5].u64),
638 .code = (BPF_ALU | EBPF_MOV | BPF_K),
639 .dst_reg = EBPF_REG_0,
643 .code = (BPF_JMP | EBPF_EXIT),
648 test_shift1_prepare(void *arg)
650 struct dummy_vect8 *dv;
657 memset(dv, 0, sizeof(*dv));
659 dv->in[1].u64 = v << 12 | v >> 6;
660 dv->in[2].u32 = (-v ^ 5);
664 test_shift1_check(uint64_t rc, const void *arg)
667 const struct dummy_vect8 *dvt;
668 struct dummy_vect8 dve;
671 memset(&dve, 0, sizeof(dve));
677 r2 = (uint32_t)r2 << TEST_SHIFT_1;
678 r3 = (int64_t)r3 >> TEST_SHIFT_2;
683 r4 &= TEST_SHIFT64_MASK;
685 r4 &= TEST_SHIFT32_MASK;
686 r2 = (uint32_t)r2 >> r4;
695 r2 &= TEST_SHIFT64_MASK;
696 r3 = (int64_t)r3 >> r2;
697 r2 &= TEST_SHIFT32_MASK;
698 r4 = (uint32_t)r4 << r2;
703 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
707 static const struct ebpf_insn test_jump1_prog[] = {
710 .code = (BPF_ALU | EBPF_MOV | BPF_K),
711 .dst_reg = EBPF_REG_0,
715 .code = (BPF_LDX | BPF_MEM | BPF_W),
716 .dst_reg = EBPF_REG_2,
717 .src_reg = EBPF_REG_1,
718 .off = offsetof(struct dummy_vect8, in[0].u32),
721 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
722 .dst_reg = EBPF_REG_3,
723 .src_reg = EBPF_REG_1,
724 .off = offsetof(struct dummy_vect8, in[0].u64),
727 .code = (BPF_LDX | BPF_MEM | BPF_W),
728 .dst_reg = EBPF_REG_4,
729 .src_reg = EBPF_REG_1,
730 .off = offsetof(struct dummy_vect8, in[1].u32),
733 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
734 .dst_reg = EBPF_REG_5,
735 .src_reg = EBPF_REG_1,
736 .off = offsetof(struct dummy_vect8, in[1].u64),
739 .code = (BPF_JMP | BPF_JEQ | BPF_K),
740 .dst_reg = EBPF_REG_2,
745 .code = (BPF_JMP | EBPF_JSLE | BPF_K),
746 .dst_reg = EBPF_REG_3,
751 .code = (BPF_JMP | BPF_JGT | BPF_K),
752 .dst_reg = EBPF_REG_4,
757 .code = (BPF_JMP | BPF_JSET | BPF_K),
758 .dst_reg = EBPF_REG_5,
763 .code = (BPF_JMP | EBPF_JNE | BPF_X),
764 .dst_reg = EBPF_REG_2,
765 .src_reg = EBPF_REG_3,
769 .code = (BPF_JMP | EBPF_JSGT | BPF_X),
770 .dst_reg = EBPF_REG_2,
771 .src_reg = EBPF_REG_4,
775 .code = (BPF_JMP | EBPF_JLE | BPF_X),
776 .dst_reg = EBPF_REG_2,
777 .src_reg = EBPF_REG_5,
781 .code = (BPF_JMP | BPF_JSET | BPF_X),
782 .dst_reg = EBPF_REG_3,
783 .src_reg = EBPF_REG_5,
787 .code = (BPF_JMP | EBPF_EXIT),
790 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
791 .dst_reg = EBPF_REG_0,
795 .code = (BPF_JMP | BPF_JA),
799 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
800 .dst_reg = EBPF_REG_0,
804 .code = (BPF_JMP | BPF_JA),
808 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
809 .dst_reg = EBPF_REG_0,
813 .code = (BPF_JMP | BPF_JA),
817 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
818 .dst_reg = EBPF_REG_0,
822 .code = (BPF_JMP | BPF_JA),
826 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
827 .dst_reg = EBPF_REG_0,
831 .code = (BPF_JMP | BPF_JA),
835 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
836 .dst_reg = EBPF_REG_0,
840 .code = (BPF_JMP | BPF_JA),
844 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
845 .dst_reg = EBPF_REG_0,
849 .code = (BPF_JMP | BPF_JA),
853 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
854 .dst_reg = EBPF_REG_0,
858 .code = (BPF_JMP | BPF_JA),
864 test_jump1_prepare(void *arg)
866 struct dummy_vect8 *dv;
874 memset(dv, 0, sizeof(*dv));
877 dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
878 dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
882 test_jump1_check(uint64_t rc, const void *arg)
884 uint64_t r2, r3, r4, r5, rv;
885 const struct dummy_vect8 *dvt;
895 if (r2 == TEST_JCC_1)
897 if ((int64_t)r3 <= TEST_JCC_2)
905 if ((int64_t)r2 > (int64_t)r4)
912 return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
915 /* Jump test case - check ip4_dest in particular subnet */
916 static const struct ebpf_insn test_jump2_prog[] = {
919 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
920 .dst_reg = EBPF_REG_2,
924 .code = (BPF_LDX | BPF_MEM | BPF_H),
925 .dst_reg = EBPF_REG_3,
926 .src_reg = EBPF_REG_1,
930 .code = (BPF_JMP | EBPF_JNE | BPF_K),
931 .dst_reg = EBPF_REG_3,
936 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
937 .dst_reg = EBPF_REG_2,
941 .code = (BPF_LDX | BPF_MEM | BPF_H),
942 .dst_reg = EBPF_REG_3,
943 .src_reg = EBPF_REG_1,
947 .code = (EBPF_ALU64 | BPF_AND | BPF_K),
948 .dst_reg = EBPF_REG_3,
952 .code = (BPF_JMP | EBPF_JNE | BPF_K),
953 .dst_reg = EBPF_REG_3,
958 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
959 .dst_reg = EBPF_REG_1,
960 .src_reg = EBPF_REG_2,
963 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
964 .dst_reg = EBPF_REG_0,
968 .code = (BPF_LDX | BPF_MEM | BPF_W),
969 .dst_reg = EBPF_REG_1,
970 .src_reg = EBPF_REG_1,
974 .code = (BPF_ALU | EBPF_MOV | BPF_K),
975 .dst_reg = EBPF_REG_3,
979 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
980 .dst_reg = EBPF_REG_3,
981 .imm = sizeof(uint32_t) * CHAR_BIT,
984 .code = (BPF_ALU | BPF_AND | BPF_X),
985 .dst_reg = EBPF_REG_1,
986 .src_reg = EBPF_REG_3,
989 .code = (BPF_ALU | EBPF_MOV | BPF_K),
990 .dst_reg = EBPF_REG_3,
994 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
995 .dst_reg = EBPF_REG_3,
996 .imm = sizeof(uint32_t) * CHAR_BIT,
999 .code = (BPF_JMP | BPF_JEQ | BPF_X),
1000 .dst_reg = EBPF_REG_1,
1001 .src_reg = EBPF_REG_3,
1005 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1006 .dst_reg = EBPF_REG_0,
1010 .code = (BPF_JMP | EBPF_EXIT),
1014 /* Preparing a vlan packet */
1016 test_jump2_prepare(void *arg)
1018 struct dummy_net *dn;
1021 memset(dn, 0, sizeof(*dn));
1024 * Initialize ether header.
1026 rte_ether_addr_copy((struct rte_ether_addr *)dst_mac,
1027 &dn->eth_hdr.dst_addr);
1028 rte_ether_addr_copy((struct rte_ether_addr *)src_mac,
1029 &dn->eth_hdr.src_addr);
1030 dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1033 * Initialize vlan header.
1035 dn->vlan_hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1036 dn->vlan_hdr.vlan_tci = 32;
1039 * Initialize IP header.
1041 dn->ip_hdr.version_ihl = 0x45; /*IP_VERSION | IP_HDRLEN*/
1042 dn->ip_hdr.time_to_live = 64; /* IP_DEFTTL */
1043 dn->ip_hdr.next_proto_id = IPPROTO_TCP;
1044 dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c);
1045 dn->ip_hdr.total_length = rte_cpu_to_be_16(60);
1046 dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr);
1047 dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr);
1051 test_jump2_check(uint64_t rc, const void *arg)
1053 const struct rte_ether_hdr *eth_hdr = arg;
1054 const struct rte_ipv4_hdr *ipv4_hdr;
1055 const void *next = eth_hdr;
1059 if (eth_hdr->ether_type == htons(0x8100)) {
1060 const struct rte_vlan_hdr *vlan_hdr =
1061 (const void *)(eth_hdr + 1);
1062 eth_type = vlan_hdr->eth_proto;
1063 next = vlan_hdr + 1;
1065 eth_type = eth_hdr->ether_type;
1069 if (eth_type == htons(0x0800)) {
1071 if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) ==
1072 rte_cpu_to_be_32(TEST_SUBNET)) {
1077 return cmp_res(__func__, v, rc, arg, arg, sizeof(arg));
1080 /* alu (add, sub, and, or, xor, neg) test-cases */
1081 static const struct ebpf_insn test_alu1_prog[] = {
1084 .code = (BPF_LDX | BPF_MEM | BPF_W),
1085 .dst_reg = EBPF_REG_2,
1086 .src_reg = EBPF_REG_1,
1087 .off = offsetof(struct dummy_vect8, in[0].u32),
1090 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1091 .dst_reg = EBPF_REG_3,
1092 .src_reg = EBPF_REG_1,
1093 .off = offsetof(struct dummy_vect8, in[0].u64),
1096 .code = (BPF_LDX | BPF_MEM | BPF_W),
1097 .dst_reg = EBPF_REG_4,
1098 .src_reg = EBPF_REG_1,
1099 .off = offsetof(struct dummy_vect8, in[1].u32),
1102 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1103 .dst_reg = EBPF_REG_5,
1104 .src_reg = EBPF_REG_1,
1105 .off = offsetof(struct dummy_vect8, in[1].u64),
1108 .code = (BPF_ALU | BPF_AND | BPF_K),
1109 .dst_reg = EBPF_REG_2,
1113 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1114 .dst_reg = EBPF_REG_3,
1118 .code = (BPF_ALU | BPF_XOR | BPF_K),
1119 .dst_reg = EBPF_REG_4,
1123 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1124 .dst_reg = EBPF_REG_5,
1128 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1129 .dst_reg = EBPF_REG_1,
1130 .src_reg = EBPF_REG_2,
1131 .off = offsetof(struct dummy_vect8, out[0].u64),
1134 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1135 .dst_reg = EBPF_REG_1,
1136 .src_reg = EBPF_REG_3,
1137 .off = offsetof(struct dummy_vect8, out[1].u64),
1140 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1141 .dst_reg = EBPF_REG_1,
1142 .src_reg = EBPF_REG_4,
1143 .off = offsetof(struct dummy_vect8, out[2].u64),
1146 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1147 .dst_reg = EBPF_REG_1,
1148 .src_reg = EBPF_REG_5,
1149 .off = offsetof(struct dummy_vect8, out[3].u64),
1152 .code = (BPF_ALU | BPF_OR | BPF_X),
1153 .dst_reg = EBPF_REG_2,
1154 .src_reg = EBPF_REG_3,
1157 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
1158 .dst_reg = EBPF_REG_3,
1159 .src_reg = EBPF_REG_4,
1162 .code = (BPF_ALU | BPF_SUB | BPF_X),
1163 .dst_reg = EBPF_REG_4,
1164 .src_reg = EBPF_REG_5,
1167 .code = (EBPF_ALU64 | BPF_AND | BPF_X),
1168 .dst_reg = EBPF_REG_5,
1169 .src_reg = EBPF_REG_2,
1172 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1173 .dst_reg = EBPF_REG_1,
1174 .src_reg = EBPF_REG_2,
1175 .off = offsetof(struct dummy_vect8, out[4].u64),
1178 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1179 .dst_reg = EBPF_REG_1,
1180 .src_reg = EBPF_REG_3,
1181 .off = offsetof(struct dummy_vect8, out[5].u64),
1184 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1185 .dst_reg = EBPF_REG_1,
1186 .src_reg = EBPF_REG_4,
1187 .off = offsetof(struct dummy_vect8, out[6].u64),
1190 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1191 .dst_reg = EBPF_REG_1,
1192 .src_reg = EBPF_REG_5,
1193 .off = offsetof(struct dummy_vect8, out[7].u64),
1195 /* return (-r2 + (-r3)) */
1197 .code = (BPF_ALU | BPF_NEG),
1198 .dst_reg = EBPF_REG_2,
1201 .code = (EBPF_ALU64 | BPF_NEG),
1202 .dst_reg = EBPF_REG_3,
1205 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1206 .dst_reg = EBPF_REG_2,
1207 .src_reg = EBPF_REG_3,
1210 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1211 .dst_reg = EBPF_REG_0,
1212 .src_reg = EBPF_REG_2,
1215 .code = (BPF_JMP | EBPF_EXIT),
1220 test_alu1_check(uint64_t rc, const void *arg)
1222 uint64_t r2, r3, r4, r5, rv;
1223 const struct dummy_vect8 *dvt;
1224 struct dummy_vect8 dve;
1227 memset(&dve, 0, sizeof(dve));
1229 r2 = dvt->in[0].u32;
1230 r3 = dvt->in[0].u64;
1231 r4 = dvt->in[1].u32;
1232 r5 = dvt->in[1].u64;
1234 r2 = (uint32_t)r2 & TEST_FILL_1;
1235 r3 |= (int32_t) TEST_FILL_1;
1236 r4 = (uint32_t)r4 ^ TEST_FILL_1;
1237 r5 += (int32_t)TEST_FILL_1;
1239 dve.out[0].u64 = r2;
1240 dve.out[1].u64 = r3;
1241 dve.out[2].u64 = r4;
1242 dve.out[3].u64 = r5;
1244 r2 = (uint32_t)r2 | (uint32_t)r3;
1246 r4 = (uint32_t)r4 - (uint32_t)r5;
1249 dve.out[4].u64 = r2;
1250 dve.out[5].u64 = r3;
1251 dve.out[6].u64 = r4;
1252 dve.out[7].u64 = r5;
1259 return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1262 /* endianness conversions (BE->LE/LE->BE) test-cases */
1263 static const struct ebpf_insn test_bele1_prog[] = {
1266 .code = (BPF_LDX | BPF_MEM | BPF_H),
1267 .dst_reg = EBPF_REG_2,
1268 .src_reg = EBPF_REG_1,
1269 .off = offsetof(struct dummy_vect8, in[0].u16),
1272 .code = (BPF_LDX | BPF_MEM | BPF_W),
1273 .dst_reg = EBPF_REG_3,
1274 .src_reg = EBPF_REG_1,
1275 .off = offsetof(struct dummy_vect8, in[0].u32),
1278 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1279 .dst_reg = EBPF_REG_4,
1280 .src_reg = EBPF_REG_1,
1281 .off = offsetof(struct dummy_vect8, in[0].u64),
1284 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1285 .dst_reg = EBPF_REG_2,
1286 .imm = sizeof(uint16_t) * CHAR_BIT,
1289 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1290 .dst_reg = EBPF_REG_3,
1291 .imm = sizeof(uint32_t) * CHAR_BIT,
1294 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1295 .dst_reg = EBPF_REG_4,
1296 .imm = sizeof(uint64_t) * CHAR_BIT,
1299 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1300 .dst_reg = EBPF_REG_1,
1301 .src_reg = EBPF_REG_2,
1302 .off = offsetof(struct dummy_vect8, out[0].u64),
1305 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1306 .dst_reg = EBPF_REG_1,
1307 .src_reg = EBPF_REG_3,
1308 .off = offsetof(struct dummy_vect8, out[1].u64),
1311 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1312 .dst_reg = EBPF_REG_1,
1313 .src_reg = EBPF_REG_4,
1314 .off = offsetof(struct dummy_vect8, out[2].u64),
1317 .code = (BPF_LDX | BPF_MEM | BPF_H),
1318 .dst_reg = EBPF_REG_2,
1319 .src_reg = EBPF_REG_1,
1320 .off = offsetof(struct dummy_vect8, in[0].u16),
1323 .code = (BPF_LDX | BPF_MEM | BPF_W),
1324 .dst_reg = EBPF_REG_3,
1325 .src_reg = EBPF_REG_1,
1326 .off = offsetof(struct dummy_vect8, in[0].u32),
1329 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1330 .dst_reg = EBPF_REG_4,
1331 .src_reg = EBPF_REG_1,
1332 .off = offsetof(struct dummy_vect8, in[0].u64),
1335 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1336 .dst_reg = EBPF_REG_2,
1337 .imm = sizeof(uint16_t) * CHAR_BIT,
1340 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1341 .dst_reg = EBPF_REG_3,
1342 .imm = sizeof(uint32_t) * CHAR_BIT,
1345 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1346 .dst_reg = EBPF_REG_4,
1347 .imm = sizeof(uint64_t) * CHAR_BIT,
1350 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1351 .dst_reg = EBPF_REG_1,
1352 .src_reg = EBPF_REG_2,
1353 .off = offsetof(struct dummy_vect8, out[3].u64),
1356 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1357 .dst_reg = EBPF_REG_1,
1358 .src_reg = EBPF_REG_3,
1359 .off = offsetof(struct dummy_vect8, out[4].u64),
1362 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1363 .dst_reg = EBPF_REG_1,
1364 .src_reg = EBPF_REG_4,
1365 .off = offsetof(struct dummy_vect8, out[5].u64),
1369 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1370 .dst_reg = EBPF_REG_0,
1374 .code = (BPF_JMP | EBPF_EXIT),
1379 test_bele1_prepare(void *arg)
1381 struct dummy_vect8 *dv;
1385 memset(dv, 0, sizeof(*dv));
1386 dv->in[0].u64 = rte_rand();
1387 dv->in[0].u32 = dv->in[0].u64;
1388 dv->in[0].u16 = dv->in[0].u64;
1392 test_bele1_check(uint64_t rc, const void *arg)
1394 uint64_t r2, r3, r4;
1395 const struct dummy_vect8 *dvt;
1396 struct dummy_vect8 dve;
1399 memset(&dve, 0, sizeof(dve));
1401 r2 = dvt->in[0].u16;
1402 r3 = dvt->in[0].u32;
1403 r4 = dvt->in[0].u64;
1405 r2 = rte_cpu_to_be_16(r2);
1406 r3 = rte_cpu_to_be_32(r3);
1407 r4 = rte_cpu_to_be_64(r4);
1409 dve.out[0].u64 = r2;
1410 dve.out[1].u64 = r3;
1411 dve.out[2].u64 = r4;
1413 r2 = dvt->in[0].u16;
1414 r3 = dvt->in[0].u32;
1415 r4 = dvt->in[0].u64;
1417 r2 = rte_cpu_to_le_16(r2);
1418 r3 = rte_cpu_to_le_32(r3);
1419 r4 = rte_cpu_to_le_64(r4);
1421 dve.out[3].u64 = r2;
1422 dve.out[4].u64 = r3;
1423 dve.out[5].u64 = r4;
1425 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1428 /* atomic add test-cases */
1429 static const struct ebpf_insn test_xadd1_prog[] = {
1432 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1433 .dst_reg = EBPF_REG_2,
1437 .code = (BPF_STX | EBPF_XADD | BPF_W),
1438 .dst_reg = EBPF_REG_1,
1439 .src_reg = EBPF_REG_2,
1440 .off = offsetof(struct dummy_offset, u32),
1443 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1444 .dst_reg = EBPF_REG_1,
1445 .src_reg = EBPF_REG_2,
1446 .off = offsetof(struct dummy_offset, u64),
1449 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1450 .dst_reg = EBPF_REG_3,
1454 .code = (BPF_STX | EBPF_XADD | BPF_W),
1455 .dst_reg = EBPF_REG_1,
1456 .src_reg = EBPF_REG_3,
1457 .off = offsetof(struct dummy_offset, u32),
1460 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1461 .dst_reg = EBPF_REG_1,
1462 .src_reg = EBPF_REG_3,
1463 .off = offsetof(struct dummy_offset, u64),
1466 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1467 .dst_reg = EBPF_REG_4,
1471 .code = (BPF_STX | EBPF_XADD | BPF_W),
1472 .dst_reg = EBPF_REG_1,
1473 .src_reg = EBPF_REG_4,
1474 .off = offsetof(struct dummy_offset, u32),
1477 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1478 .dst_reg = EBPF_REG_1,
1479 .src_reg = EBPF_REG_4,
1480 .off = offsetof(struct dummy_offset, u64),
1483 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1484 .dst_reg = EBPF_REG_5,
1488 .code = (BPF_STX | EBPF_XADD | BPF_W),
1489 .dst_reg = EBPF_REG_1,
1490 .src_reg = EBPF_REG_5,
1491 .off = offsetof(struct dummy_offset, u32),
1494 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1495 .dst_reg = EBPF_REG_1,
1496 .src_reg = EBPF_REG_5,
1497 .off = offsetof(struct dummy_offset, u64),
1500 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1501 .dst_reg = EBPF_REG_6,
1505 .code = (BPF_STX | EBPF_XADD | BPF_W),
1506 .dst_reg = EBPF_REG_1,
1507 .src_reg = EBPF_REG_6,
1508 .off = offsetof(struct dummy_offset, u32),
1511 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1512 .dst_reg = EBPF_REG_1,
1513 .src_reg = EBPF_REG_6,
1514 .off = offsetof(struct dummy_offset, u64),
1517 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1518 .dst_reg = EBPF_REG_7,
1522 .code = (BPF_STX | EBPF_XADD | BPF_W),
1523 .dst_reg = EBPF_REG_1,
1524 .src_reg = EBPF_REG_7,
1525 .off = offsetof(struct dummy_offset, u32),
1528 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1529 .dst_reg = EBPF_REG_1,
1530 .src_reg = EBPF_REG_7,
1531 .off = offsetof(struct dummy_offset, u64),
1534 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1535 .dst_reg = EBPF_REG_8,
1539 .code = (BPF_STX | EBPF_XADD | BPF_W),
1540 .dst_reg = EBPF_REG_1,
1541 .src_reg = EBPF_REG_8,
1542 .off = offsetof(struct dummy_offset, u32),
1545 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1546 .dst_reg = EBPF_REG_1,
1547 .src_reg = EBPF_REG_8,
1548 .off = offsetof(struct dummy_offset, u64),
1552 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1553 .dst_reg = EBPF_REG_0,
1557 .code = (BPF_JMP | EBPF_EXIT),
1562 test_xadd1_check(uint64_t rc, const void *arg)
1565 const struct dummy_offset *dft;
1566 struct dummy_offset dfe;
1569 memset(&dfe, 0, sizeof(dfe));
1572 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1573 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1576 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1577 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1579 rv = (int32_t)TEST_FILL_1;
1580 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1581 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1584 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1585 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1588 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1589 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1592 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1593 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1596 __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
1597 __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
1599 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1602 /* alu div test-cases */
1603 static const struct ebpf_insn test_div1_prog[] = {
1606 .code = (BPF_LDX | BPF_MEM | BPF_W),
1607 .dst_reg = EBPF_REG_2,
1608 .src_reg = EBPF_REG_1,
1609 .off = offsetof(struct dummy_vect8, in[0].u32),
1612 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1613 .dst_reg = EBPF_REG_3,
1614 .src_reg = EBPF_REG_1,
1615 .off = offsetof(struct dummy_vect8, in[1].u64),
1618 .code = (BPF_LDX | BPF_MEM | BPF_W),
1619 .dst_reg = EBPF_REG_4,
1620 .src_reg = EBPF_REG_1,
1621 .off = offsetof(struct dummy_vect8, in[2].u32),
1624 .code = (BPF_ALU | BPF_DIV | BPF_K),
1625 .dst_reg = EBPF_REG_2,
1629 .code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1630 .dst_reg = EBPF_REG_3,
1634 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1635 .dst_reg = EBPF_REG_2,
1639 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1640 .dst_reg = EBPF_REG_3,
1644 .code = (BPF_ALU | BPF_MOD | BPF_X),
1645 .dst_reg = EBPF_REG_4,
1646 .src_reg = EBPF_REG_2,
1649 .code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1650 .dst_reg = EBPF_REG_4,
1651 .src_reg = EBPF_REG_3,
1654 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1655 .dst_reg = EBPF_REG_1,
1656 .src_reg = EBPF_REG_2,
1657 .off = offsetof(struct dummy_vect8, out[0].u64),
1660 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1661 .dst_reg = EBPF_REG_1,
1662 .src_reg = EBPF_REG_3,
1663 .off = offsetof(struct dummy_vect8, out[1].u64),
1666 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1667 .dst_reg = EBPF_REG_1,
1668 .src_reg = EBPF_REG_4,
1669 .off = offsetof(struct dummy_vect8, out[2].u64),
1671 /* check that we can handle division by zero gracefully. */
1673 .code = (BPF_LDX | BPF_MEM | BPF_W),
1674 .dst_reg = EBPF_REG_2,
1675 .src_reg = EBPF_REG_1,
1676 .off = offsetof(struct dummy_vect8, in[3].u32),
1679 .code = (BPF_ALU | BPF_DIV | BPF_X),
1680 .dst_reg = EBPF_REG_4,
1681 .src_reg = EBPF_REG_2,
1685 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1686 .dst_reg = EBPF_REG_0,
1690 .code = (BPF_JMP | EBPF_EXIT),
1695 test_div1_check(uint64_t rc, const void *arg)
1697 uint64_t r2, r3, r4;
1698 const struct dummy_vect8 *dvt;
1699 struct dummy_vect8 dve;
1702 memset(&dve, 0, sizeof(dve));
1704 r2 = dvt->in[0].u32;
1705 r3 = dvt->in[1].u64;
1706 r4 = dvt->in[2].u32;
1708 r2 = (uint32_t)r2 / TEST_MUL_1;
1712 r4 = (uint32_t)(r4 % r2);
1715 dve.out[0].u64 = r2;
1716 dve.out[1].u64 = r3;
1717 dve.out[2].u64 = r4;
1720 * in the test prog we attempted to divide by zero.
1721 * so return value should return 0.
1723 return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1726 /* call test-cases */
1727 static const struct ebpf_insn test_call1_prog[] = {
1730 .code = (BPF_LDX | BPF_MEM | BPF_W),
1731 .dst_reg = EBPF_REG_2,
1732 .src_reg = EBPF_REG_1,
1733 .off = offsetof(struct dummy_offset, u32),
1736 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1737 .dst_reg = EBPF_REG_3,
1738 .src_reg = EBPF_REG_1,
1739 .off = offsetof(struct dummy_offset, u64),
1742 .code = (BPF_STX | BPF_MEM | BPF_W),
1743 .dst_reg = EBPF_REG_10,
1744 .src_reg = EBPF_REG_2,
1748 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1749 .dst_reg = EBPF_REG_10,
1750 .src_reg = EBPF_REG_3,
1754 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1755 .dst_reg = EBPF_REG_2,
1756 .src_reg = EBPF_REG_10,
1759 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1760 .dst_reg = EBPF_REG_2,
1764 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1765 .dst_reg = EBPF_REG_3,
1766 .src_reg = EBPF_REG_10,
1769 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1770 .dst_reg = EBPF_REG_3,
1774 .code = (BPF_JMP | EBPF_CALL),
1778 .code = (BPF_LDX | BPF_MEM | BPF_W),
1779 .dst_reg = EBPF_REG_2,
1780 .src_reg = EBPF_REG_10,
1784 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1785 .dst_reg = EBPF_REG_0,
1786 .src_reg = EBPF_REG_10,
1790 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1791 .dst_reg = EBPF_REG_0,
1792 .src_reg = EBPF_REG_2,
1795 .code = (BPF_JMP | EBPF_EXIT),
1800 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1802 const struct dummy_offset *dv;
1811 test_call1_check(uint64_t rc, const void *arg)
1815 const struct dummy_offset *dv;
1821 dummy_func1(arg, &v32, &v64);
1824 return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1827 static const struct rte_bpf_xsym test_call1_xsym[] = {
1829 .name = RTE_STR(dummy_func1),
1830 .type = RTE_BPF_XTYPE_FUNC,
1832 .val = (void *)dummy_func1,
1836 .type = RTE_BPF_ARG_PTR,
1837 .size = sizeof(struct dummy_offset),
1840 .type = RTE_BPF_ARG_PTR,
1841 .size = sizeof(uint32_t),
1844 .type = RTE_BPF_ARG_PTR,
1845 .size = sizeof(uint64_t),
1852 static const struct ebpf_insn test_call2_prog[] = {
1855 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1856 .dst_reg = EBPF_REG_1,
1857 .src_reg = EBPF_REG_10,
1860 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1861 .dst_reg = EBPF_REG_1,
1862 .imm = -(int32_t)sizeof(struct dummy_offset),
1865 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1866 .dst_reg = EBPF_REG_2,
1867 .src_reg = EBPF_REG_10,
1870 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1871 .dst_reg = EBPF_REG_2,
1872 .imm = -2 * (int32_t)sizeof(struct dummy_offset),
1875 .code = (BPF_JMP | EBPF_CALL),
1879 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1880 .dst_reg = EBPF_REG_1,
1881 .src_reg = EBPF_REG_10,
1882 .off = -(int32_t)(sizeof(struct dummy_offset) -
1883 offsetof(struct dummy_offset, u64)),
1886 .code = (BPF_LDX | BPF_MEM | BPF_W),
1887 .dst_reg = EBPF_REG_0,
1888 .src_reg = EBPF_REG_10,
1889 .off = -(int32_t)(sizeof(struct dummy_offset) -
1890 offsetof(struct dummy_offset, u32)),
1893 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1894 .dst_reg = EBPF_REG_0,
1895 .src_reg = EBPF_REG_1,
1898 .code = (BPF_LDX | BPF_MEM | BPF_H),
1899 .dst_reg = EBPF_REG_1,
1900 .src_reg = EBPF_REG_10,
1901 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1902 offsetof(struct dummy_offset, u16)),
1905 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1906 .dst_reg = EBPF_REG_0,
1907 .src_reg = EBPF_REG_1,
1910 .code = (BPF_LDX | BPF_MEM | BPF_B),
1911 .dst_reg = EBPF_REG_1,
1912 .src_reg = EBPF_REG_10,
1913 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1914 offsetof(struct dummy_offset, u8)),
1917 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1918 .dst_reg = EBPF_REG_0,
1919 .src_reg = EBPF_REG_1,
1922 .code = (BPF_JMP | EBPF_EXIT),
1928 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1944 test_call2_check(uint64_t rc, const void *arg)
1947 struct dummy_offset a, b;
1951 dummy_func2(&a, &b);
1952 v = a.u64 + a.u32 + b.u16 + b.u8;
1954 return cmp_res(__func__, v, rc, arg, arg, 0);
1957 static const struct rte_bpf_xsym test_call2_xsym[] = {
1959 .name = RTE_STR(dummy_func2),
1960 .type = RTE_BPF_XTYPE_FUNC,
1962 .val = (void *)dummy_func2,
1966 .type = RTE_BPF_ARG_PTR,
1967 .size = sizeof(struct dummy_offset),
1970 .type = RTE_BPF_ARG_PTR,
1971 .size = sizeof(struct dummy_offset),
1978 static const struct ebpf_insn test_call3_prog[] = {
1981 .code = (BPF_JMP | EBPF_CALL),
1985 .code = (BPF_LDX | BPF_MEM | BPF_B),
1986 .dst_reg = EBPF_REG_2,
1987 .src_reg = EBPF_REG_0,
1988 .off = offsetof(struct dummy_offset, u8),
1991 .code = (BPF_LDX | BPF_MEM | BPF_H),
1992 .dst_reg = EBPF_REG_3,
1993 .src_reg = EBPF_REG_0,
1994 .off = offsetof(struct dummy_offset, u16),
1997 .code = (BPF_LDX | BPF_MEM | BPF_W),
1998 .dst_reg = EBPF_REG_4,
1999 .src_reg = EBPF_REG_0,
2000 .off = offsetof(struct dummy_offset, u32),
2003 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
2004 .dst_reg = EBPF_REG_0,
2005 .src_reg = EBPF_REG_0,
2006 .off = offsetof(struct dummy_offset, u64),
2010 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2011 .dst_reg = EBPF_REG_0,
2012 .src_reg = EBPF_REG_4,
2015 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2016 .dst_reg = EBPF_REG_0,
2017 .src_reg = EBPF_REG_3,
2020 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2021 .dst_reg = EBPF_REG_0,
2022 .src_reg = EBPF_REG_2,
2025 .code = (BPF_JMP | EBPF_EXIT),
2029 static const struct dummy_offset *
2030 dummy_func3(const struct dummy_vect8 *p)
2032 return &p->in[RTE_DIM(p->in) - 1];
2036 test_call3_prepare(void *arg)
2038 struct dummy_vect8 *pv;
2039 struct dummy_offset *df;
2042 df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv);
2044 memset(pv, 0, sizeof(*pv));
2045 df->u64 = (int32_t)TEST_FILL_1;
2052 test_call3_check(uint64_t rc, const void *arg)
2055 const struct dummy_vect8 *pv;
2056 const struct dummy_offset *dft;
2059 dft = dummy_func3(pv);
2066 return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv));
2069 static const struct rte_bpf_xsym test_call3_xsym[] = {
2071 .name = RTE_STR(dummy_func3),
2072 .type = RTE_BPF_XTYPE_FUNC,
2074 .val = (void *)dummy_func3,
2078 .type = RTE_BPF_ARG_PTR,
2079 .size = sizeof(struct dummy_vect8),
2083 .type = RTE_BPF_ARG_PTR,
2084 .size = sizeof(struct dummy_offset),
2090 /* Test for stack corruption in multiple function calls */
2091 static const struct ebpf_insn test_call4_prog[] = {
2093 .code = (BPF_ST | BPF_MEM | BPF_B),
2094 .dst_reg = EBPF_REG_10,
2099 .code = (BPF_ST | BPF_MEM | BPF_B),
2100 .dst_reg = EBPF_REG_10,
2105 .code = (BPF_ST | BPF_MEM | BPF_B),
2106 .dst_reg = EBPF_REG_10,
2111 .code = (BPF_ST | BPF_MEM | BPF_B),
2112 .dst_reg = EBPF_REG_10,
2117 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2118 .dst_reg = EBPF_REG_1,
2119 .src_reg = EBPF_REG_10,
2122 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2123 .dst_reg = EBPF_REG_2,
2127 .code = (EBPF_ALU64 | BPF_SUB | BPF_X),
2128 .dst_reg = EBPF_REG_1,
2129 .src_reg = EBPF_REG_2,
2132 .code = (BPF_JMP | EBPF_CALL),
2136 .code = (BPF_LDX | BPF_MEM | BPF_B),
2137 .dst_reg = EBPF_REG_1,
2138 .src_reg = EBPF_REG_10,
2142 .code = (BPF_LDX | BPF_MEM | BPF_B),
2143 .dst_reg = EBPF_REG_2,
2144 .src_reg = EBPF_REG_10,
2148 .code = (BPF_LDX | BPF_MEM | BPF_B),
2149 .dst_reg = EBPF_REG_3,
2150 .src_reg = EBPF_REG_10,
2154 .code = (BPF_LDX | BPF_MEM | BPF_B),
2155 .dst_reg = EBPF_REG_4,
2156 .src_reg = EBPF_REG_10,
2160 .code = (BPF_JMP | EBPF_CALL),
2164 .code = (EBPF_ALU64 | BPF_XOR | BPF_K),
2165 .dst_reg = EBPF_REG_0,
2166 .imm = TEST_MEMFROB,
2169 .code = (BPF_JMP | EBPF_EXIT),
2173 /* Gathering the bytes together */
2175 dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
2177 return (a << 24) | (b << 16) | (c << 8) | (d << 0);
2180 /* Implementation of memfrob */
2182 dummy_func4_0(uint32_t *s, uint8_t n)
2184 char *p = (char *) s;
2192 test_call4_check(uint64_t rc, const void *arg)
2194 uint8_t a[4] = {1, 2, 3, 4};
2199 s = dummy_func4_0((uint32_t *)a, 4);
2201 s = dummy_func4_1(a[0], a[1], a[2], a[3]);
2203 v = s ^ TEST_MEMFROB;
2205 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2208 static const struct rte_bpf_xsym test_call4_xsym[] = {
2210 .name = RTE_STR(dummy_func4_0),
2211 .type = RTE_BPF_XTYPE_FUNC,
2213 .val = (void *)dummy_func4_0,
2217 .type = RTE_BPF_ARG_PTR,
2218 .size = 4 * sizeof(uint8_t),
2221 .type = RTE_BPF_ARG_RAW,
2222 .size = sizeof(uint8_t),
2226 .type = RTE_BPF_ARG_RAW,
2227 .size = sizeof(uint32_t),
2232 .name = RTE_STR(dummy_func4_1),
2233 .type = RTE_BPF_XTYPE_FUNC,
2235 .val = (void *)dummy_func4_1,
2239 .type = RTE_BPF_ARG_RAW,
2240 .size = sizeof(uint8_t),
2243 .type = RTE_BPF_ARG_RAW,
2244 .size = sizeof(uint8_t),
2247 .type = RTE_BPF_ARG_RAW,
2248 .size = sizeof(uint8_t),
2251 .type = RTE_BPF_ARG_RAW,
2252 .size = sizeof(uint8_t),
2256 .type = RTE_BPF_ARG_RAW,
2257 .size = sizeof(uint32_t),
2263 /* string compare test case */
2264 static const struct ebpf_insn test_call5_prog[] = {
2267 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2268 .dst_reg = EBPF_REG_1,
2272 .code = (BPF_STX | BPF_MEM | BPF_W),
2273 .dst_reg = EBPF_REG_10,
2274 .src_reg = EBPF_REG_1,
2278 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2279 .dst_reg = EBPF_REG_6,
2283 .code = (BPF_STX | BPF_MEM | BPF_B),
2284 .dst_reg = EBPF_REG_10,
2285 .src_reg = EBPF_REG_6,
2289 .code = (BPF_STX | BPF_MEM | BPF_W),
2290 .dst_reg = EBPF_REG_10,
2291 .src_reg = EBPF_REG_6,
2295 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2296 .dst_reg = EBPF_REG_1,
2300 .code = (BPF_STX | BPF_MEM | BPF_W),
2301 .dst_reg = EBPF_REG_10,
2302 .src_reg = EBPF_REG_1,
2306 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2307 .dst_reg = EBPF_REG_1,
2308 .src_reg = EBPF_REG_10,
2311 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2312 .dst_reg = EBPF_REG_1,
2316 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2317 .dst_reg = EBPF_REG_2,
2318 .src_reg = EBPF_REG_1,
2321 .code = (BPF_JMP | EBPF_CALL),
2325 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2326 .dst_reg = EBPF_REG_1,
2327 .src_reg = EBPF_REG_0,
2330 .code = (BPF_ALU | EBPF_MOV | BPF_K),
2331 .dst_reg = EBPF_REG_0,
2335 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2336 .dst_reg = EBPF_REG_1,
2340 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2341 .dst_reg = EBPF_REG_1,
2345 .code = (BPF_JMP | EBPF_JNE | BPF_K),
2346 .dst_reg = EBPF_REG_1,
2351 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2352 .dst_reg = EBPF_REG_1,
2353 .src_reg = EBPF_REG_10,
2356 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2357 .dst_reg = EBPF_REG_1,
2361 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2362 .dst_reg = EBPF_REG_2,
2363 .src_reg = EBPF_REG_10,
2366 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2367 .dst_reg = EBPF_REG_2,
2371 .code = (BPF_JMP | EBPF_CALL),
2375 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2376 .dst_reg = EBPF_REG_1,
2377 .src_reg = EBPF_REG_0,
2380 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2381 .dst_reg = EBPF_REG_1,
2385 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2386 .dst_reg = EBPF_REG_1,
2390 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2391 .dst_reg = EBPF_REG_0,
2392 .src_reg = EBPF_REG_1,
2395 .code = (BPF_JMP | BPF_JEQ | BPF_X),
2396 .dst_reg = EBPF_REG_1,
2397 .src_reg = EBPF_REG_6,
2401 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2402 .dst_reg = EBPF_REG_0,
2406 .code = (BPF_JMP | EBPF_EXIT),
2410 /* String comparison implementation, return 0 if equal else difference */
2412 dummy_func5(const char *s1, const char *s2)
2414 while (*s1 && (*s1 == *s2)) {
2418 return *(const unsigned char *)s1 - *(const unsigned char *)s2;
2422 test_call5_check(uint64_t rc, const void *arg)
2430 v = dummy_func5(a, a);
2436 v = dummy_func5(a, b);
2443 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2446 static const struct rte_bpf_xsym test_call5_xsym[] = {
2448 .name = RTE_STR(dummy_func5),
2449 .type = RTE_BPF_XTYPE_FUNC,
2451 .val = (void *)dummy_func5,
2455 .type = RTE_BPF_ARG_PTR,
2456 .size = sizeof(char),
2459 .type = RTE_BPF_ARG_PTR,
2460 .size = sizeof(char),
2464 .type = RTE_BPF_ARG_RAW,
2465 .size = sizeof(uint32_t),
2471 /* load mbuf (BPF_ABS/BPF_IND) test-cases */
2472 static const struct ebpf_insn test_ld_mbuf1_prog[] = {
2474 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2476 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2477 .dst_reg = EBPF_REG_6,
2478 .src_reg = EBPF_REG_1,
2480 /* load IPv4 version and IHL */
2482 .code = (BPF_LD | BPF_ABS | BPF_B),
2483 .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2485 /* check IP version */
2487 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2488 .dst_reg = EBPF_REG_2,
2489 .src_reg = EBPF_REG_0,
2492 .code = (BPF_ALU | BPF_AND | BPF_K),
2493 .dst_reg = EBPF_REG_2,
2497 .code = (BPF_JMP | BPF_JEQ | BPF_K),
2498 .dst_reg = EBPF_REG_2,
2499 .imm = IPVERSION << 4,
2502 /* invalid IP version, return 0 */
2504 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2505 .dst_reg = EBPF_REG_0,
2506 .src_reg = EBPF_REG_0,
2509 .code = (BPF_JMP | EBPF_EXIT),
2511 /* load 3-rd byte of IP data */
2513 .code = (BPF_ALU | BPF_AND | BPF_K),
2514 .dst_reg = EBPF_REG_0,
2515 .imm = RTE_IPV4_HDR_IHL_MASK,
2518 .code = (BPF_ALU | BPF_LSH | BPF_K),
2519 .dst_reg = EBPF_REG_0,
2523 .code = (BPF_LD | BPF_IND | BPF_B),
2524 .src_reg = EBPF_REG_0,
2528 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2529 .dst_reg = EBPF_REG_7,
2530 .src_reg = EBPF_REG_0,
2532 /* load IPv4 src addr */
2534 .code = (BPF_LD | BPF_ABS | BPF_W),
2535 .imm = offsetof(struct rte_ipv4_hdr, src_addr),
2538 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2539 .dst_reg = EBPF_REG_7,
2540 .src_reg = EBPF_REG_0,
2542 /* load IPv4 total length */
2544 .code = (BPF_LD | BPF_ABS | BPF_H),
2545 .imm = offsetof(struct rte_ipv4_hdr, total_length),
2548 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2549 .dst_reg = EBPF_REG_8,
2550 .src_reg = EBPF_REG_0,
2552 /* load last 4 bytes of IP data */
2554 .code = (BPF_LD | BPF_IND | BPF_W),
2555 .src_reg = EBPF_REG_8,
2556 .imm = -(int32_t)sizeof(uint32_t),
2559 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2560 .dst_reg = EBPF_REG_7,
2561 .src_reg = EBPF_REG_0,
2563 /* load 2 bytes from the middle of IP data */
2565 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2566 .dst_reg = EBPF_REG_8,
2570 .code = (BPF_LD | BPF_IND | BPF_H),
2571 .src_reg = EBPF_REG_8,
2574 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2575 .dst_reg = EBPF_REG_0,
2576 .src_reg = EBPF_REG_7,
2579 .code = (BPF_JMP | EBPF_EXIT),
2584 dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
2591 mb->buf_iova = (uintptr_t)buf;
2592 mb->buf_len = buf_len;
2593 rte_mbuf_refcnt_set(mb, 1);
2595 /* set pool pointer to dummy value, test doesn't use it */
2596 mb->pool = (void *)buf;
2598 rte_pktmbuf_reset(mb);
2599 db = (uint8_t *)rte_pktmbuf_append(mb, data_len);
2601 for (i = 0; i != data_len; i++)
2606 test_ld_mbuf1_prepare(void *arg)
2608 struct dummy_mbuf *dm;
2609 struct rte_ipv4_hdr *ph;
2611 const uint32_t plen = 400;
2612 const struct rte_ipv4_hdr iph = {
2613 .version_ihl = RTE_IPV4_VHL_DEF,
2614 .total_length = rte_cpu_to_be_16(plen),
2615 .time_to_live = IPDEFTTL,
2616 .next_proto_id = IPPROTO_RAW,
2617 .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
2618 .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
2622 memset(dm, 0, sizeof(*dm));
2624 dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]),
2626 dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]),
2629 rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]);
2631 ph = rte_pktmbuf_mtod(dm->mb, typeof(ph));
2632 memcpy(ph, &iph, sizeof(iph));
2636 test_ld_mbuf1(const struct rte_mbuf *pkt)
2640 const uint16_t *p16;
2641 const uint32_t *p32;
2642 struct dummy_offset dof;
2644 /* load IPv4 version and IHL */
2645 p8 = rte_pktmbuf_read(pkt,
2646 offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8),
2651 /* check IP version */
2652 if ((p8[0] & 0xf0) != IPVERSION << 4)
2655 n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
2657 /* load 3-rd byte of IP data */
2658 p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof);
2664 /* load IPv4 src addr */
2665 p32 = rte_pktmbuf_read(pkt,
2666 offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32),
2671 v += rte_be_to_cpu_32(p32[0]);
2673 /* load IPv4 total length */
2674 p16 = rte_pktmbuf_read(pkt,
2675 offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16),
2680 n = rte_be_to_cpu_16(p16[0]);
2682 /* load last 4 bytes of IP data */
2683 p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof);
2687 v += rte_be_to_cpu_32(p32[0]);
2689 /* load 2 bytes from the middle of IP data */
2690 p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof);
2694 v += rte_be_to_cpu_16(p16[0]);
2699 test_ld_mbuf1_check(uint64_t rc, const void *arg)
2701 const struct dummy_mbuf *dm;
2705 v = test_ld_mbuf1(dm->mb);
2706 return cmp_res(__func__, v, rc, arg, arg, 0);
2710 * same as ld_mbuf1, but then truncate the mbuf by 1B,
2711 * so load of last 4B fail.
2714 test_ld_mbuf2_prepare(void *arg)
2716 struct dummy_mbuf *dm;
2718 test_ld_mbuf1_prepare(arg);
2720 rte_pktmbuf_trim(dm->mb, 1);
2724 test_ld_mbuf2_check(uint64_t rc, const void *arg)
2726 return cmp_res(__func__, 0, rc, arg, arg, 0);
2729 /* same as test_ld_mbuf1, but now store intermediate results on the stack */
2730 static const struct ebpf_insn test_ld_mbuf3_prog[] = {
2732 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2734 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2735 .dst_reg = EBPF_REG_6,
2736 .src_reg = EBPF_REG_1,
2738 /* load IPv4 version and IHL */
2740 .code = (BPF_LD | BPF_ABS | BPF_B),
2741 .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2743 /* check IP version */
2745 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2746 .dst_reg = EBPF_REG_2,
2747 .src_reg = EBPF_REG_0,
2750 .code = (BPF_ALU | BPF_AND | BPF_K),
2751 .dst_reg = EBPF_REG_2,
2755 .code = (BPF_JMP | BPF_JEQ | BPF_K),
2756 .dst_reg = EBPF_REG_2,
2757 .imm = IPVERSION << 4,
2760 /* invalid IP version, return 0 */
2762 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2763 .dst_reg = EBPF_REG_0,
2764 .src_reg = EBPF_REG_0,
2767 .code = (BPF_JMP | EBPF_EXIT),
2769 /* load 3-rd byte of IP data */
2771 .code = (BPF_ALU | BPF_AND | BPF_K),
2772 .dst_reg = EBPF_REG_0,
2773 .imm = RTE_IPV4_HDR_IHL_MASK,
2776 .code = (BPF_ALU | BPF_LSH | BPF_K),
2777 .dst_reg = EBPF_REG_0,
2781 .code = (BPF_LD | BPF_IND | BPF_B),
2782 .src_reg = EBPF_REG_0,
2786 .code = (BPF_STX | BPF_MEM | BPF_B),
2787 .dst_reg = EBPF_REG_10,
2788 .src_reg = EBPF_REG_0,
2789 .off = (int16_t)(offsetof(struct dummy_offset, u8) -
2790 sizeof(struct dummy_offset)),
2792 /* load IPv4 src addr */
2794 .code = (BPF_LD | BPF_ABS | BPF_W),
2795 .imm = offsetof(struct rte_ipv4_hdr, src_addr),
2798 .code = (BPF_STX | BPF_MEM | BPF_W),
2799 .dst_reg = EBPF_REG_10,
2800 .src_reg = EBPF_REG_0,
2801 .off = (int16_t)(offsetof(struct dummy_offset, u32) -
2802 sizeof(struct dummy_offset)),
2804 /* load IPv4 total length */
2806 .code = (BPF_LD | BPF_ABS | BPF_H),
2807 .imm = offsetof(struct rte_ipv4_hdr, total_length),
2810 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2811 .dst_reg = EBPF_REG_8,
2812 .src_reg = EBPF_REG_0,
2814 /* load last 4 bytes of IP data */
2816 .code = (BPF_LD | BPF_IND | BPF_W),
2817 .src_reg = EBPF_REG_8,
2818 .imm = -(int32_t)sizeof(uint32_t),
2821 .code = (BPF_STX | BPF_MEM | EBPF_DW),
2822 .dst_reg = EBPF_REG_10,
2823 .src_reg = EBPF_REG_0,
2824 .off = (int16_t)(offsetof(struct dummy_offset, u64) -
2825 sizeof(struct dummy_offset)),
2827 /* load 2 bytes from the middle of IP data */
2829 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2830 .dst_reg = EBPF_REG_8,
2834 .code = (BPF_LD | BPF_IND | BPF_H),
2835 .src_reg = EBPF_REG_8,
2838 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
2839 .dst_reg = EBPF_REG_1,
2840 .src_reg = EBPF_REG_10,
2841 .off = (int16_t)(offsetof(struct dummy_offset, u64) -
2842 sizeof(struct dummy_offset)),
2845 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2846 .dst_reg = EBPF_REG_0,
2847 .src_reg = EBPF_REG_1,
2850 .code = (BPF_LDX | BPF_MEM | BPF_W),
2851 .dst_reg = EBPF_REG_1,
2852 .src_reg = EBPF_REG_10,
2853 .off = (int16_t)(offsetof(struct dummy_offset, u32) -
2854 sizeof(struct dummy_offset)),
2857 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2858 .dst_reg = EBPF_REG_0,
2859 .src_reg = EBPF_REG_1,
2862 .code = (BPF_LDX | BPF_MEM | BPF_B),
2863 .dst_reg = EBPF_REG_1,
2864 .src_reg = EBPF_REG_10,
2865 .off = (int16_t)(offsetof(struct dummy_offset, u8) -
2866 sizeof(struct dummy_offset)),
2869 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2870 .dst_reg = EBPF_REG_0,
2871 .src_reg = EBPF_REG_1,
2874 .code = (BPF_JMP | EBPF_EXIT),
2878 /* all bpf test cases */
2879 static const struct bpf_test tests[] = {
2881 .name = "test_store1",
2882 .arg_sz = sizeof(struct dummy_offset),
2884 .ins = test_store1_prog,
2885 .nb_ins = RTE_DIM(test_store1_prog),
2887 .type = RTE_BPF_ARG_PTR,
2888 .size = sizeof(struct dummy_offset),
2891 .prepare = test_store1_prepare,
2892 .check_result = test_store1_check,
2895 .name = "test_store2",
2896 .arg_sz = sizeof(struct dummy_offset),
2898 .ins = test_store2_prog,
2899 .nb_ins = RTE_DIM(test_store2_prog),
2901 .type = RTE_BPF_ARG_PTR,
2902 .size = sizeof(struct dummy_offset),
2905 .prepare = test_store1_prepare,
2906 .check_result = test_store1_check,
2909 .name = "test_load1",
2910 .arg_sz = sizeof(struct dummy_offset),
2912 .ins = test_load1_prog,
2913 .nb_ins = RTE_DIM(test_load1_prog),
2915 .type = RTE_BPF_ARG_PTR,
2916 .size = sizeof(struct dummy_offset),
2919 .prepare = test_load1_prepare,
2920 .check_result = test_load1_check,
2923 .name = "test_ldimm1",
2924 .arg_sz = sizeof(struct dummy_offset),
2926 .ins = test_ldimm1_prog,
2927 .nb_ins = RTE_DIM(test_ldimm1_prog),
2929 .type = RTE_BPF_ARG_PTR,
2930 .size = sizeof(struct dummy_offset),
2933 .prepare = test_store1_prepare,
2934 .check_result = test_ldimm1_check,
2937 .name = "test_mul1",
2938 .arg_sz = sizeof(struct dummy_vect8),
2940 .ins = test_mul1_prog,
2941 .nb_ins = RTE_DIM(test_mul1_prog),
2943 .type = RTE_BPF_ARG_PTR,
2944 .size = sizeof(struct dummy_vect8),
2947 .prepare = test_mul1_prepare,
2948 .check_result = test_mul1_check,
2951 .name = "test_shift1",
2952 .arg_sz = sizeof(struct dummy_vect8),
2954 .ins = test_shift1_prog,
2955 .nb_ins = RTE_DIM(test_shift1_prog),
2957 .type = RTE_BPF_ARG_PTR,
2958 .size = sizeof(struct dummy_vect8),
2961 .prepare = test_shift1_prepare,
2962 .check_result = test_shift1_check,
2965 .name = "test_jump1",
2966 .arg_sz = sizeof(struct dummy_vect8),
2968 .ins = test_jump1_prog,
2969 .nb_ins = RTE_DIM(test_jump1_prog),
2971 .type = RTE_BPF_ARG_PTR,
2972 .size = sizeof(struct dummy_vect8),
2975 .prepare = test_jump1_prepare,
2976 .check_result = test_jump1_check,
2979 .name = "test_jump2",
2980 .arg_sz = sizeof(struct dummy_net),
2982 .ins = test_jump2_prog,
2983 .nb_ins = RTE_DIM(test_jump2_prog),
2985 .type = RTE_BPF_ARG_PTR,
2986 .size = sizeof(struct dummy_net),
2989 .prepare = test_jump2_prepare,
2990 .check_result = test_jump2_check,
2993 .name = "test_alu1",
2994 .arg_sz = sizeof(struct dummy_vect8),
2996 .ins = test_alu1_prog,
2997 .nb_ins = RTE_DIM(test_alu1_prog),
2999 .type = RTE_BPF_ARG_PTR,
3000 .size = sizeof(struct dummy_vect8),
3003 .prepare = test_jump1_prepare,
3004 .check_result = test_alu1_check,
3007 .name = "test_bele1",
3008 .arg_sz = sizeof(struct dummy_vect8),
3010 .ins = test_bele1_prog,
3011 .nb_ins = RTE_DIM(test_bele1_prog),
3013 .type = RTE_BPF_ARG_PTR,
3014 .size = sizeof(struct dummy_vect8),
3017 .prepare = test_bele1_prepare,
3018 .check_result = test_bele1_check,
3021 .name = "test_xadd1",
3022 .arg_sz = sizeof(struct dummy_offset),
3024 .ins = test_xadd1_prog,
3025 .nb_ins = RTE_DIM(test_xadd1_prog),
3027 .type = RTE_BPF_ARG_PTR,
3028 .size = sizeof(struct dummy_offset),
3031 .prepare = test_store1_prepare,
3032 .check_result = test_xadd1_check,
3035 .name = "test_div1",
3036 .arg_sz = sizeof(struct dummy_vect8),
3038 .ins = test_div1_prog,
3039 .nb_ins = RTE_DIM(test_div1_prog),
3041 .type = RTE_BPF_ARG_PTR,
3042 .size = sizeof(struct dummy_vect8),
3045 .prepare = test_mul1_prepare,
3046 .check_result = test_div1_check,
3049 .name = "test_call1",
3050 .arg_sz = sizeof(struct dummy_offset),
3052 .ins = test_call1_prog,
3053 .nb_ins = RTE_DIM(test_call1_prog),
3055 .type = RTE_BPF_ARG_PTR,
3056 .size = sizeof(struct dummy_offset),
3058 .xsym = test_call1_xsym,
3059 .nb_xsym = RTE_DIM(test_call1_xsym),
3061 .prepare = test_load1_prepare,
3062 .check_result = test_call1_check,
3063 /* for now don't support function calls on 32 bit platform */
3064 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3067 .name = "test_call2",
3068 .arg_sz = sizeof(struct dummy_offset),
3070 .ins = test_call2_prog,
3071 .nb_ins = RTE_DIM(test_call2_prog),
3073 .type = RTE_BPF_ARG_PTR,
3074 .size = sizeof(struct dummy_offset),
3076 .xsym = test_call2_xsym,
3077 .nb_xsym = RTE_DIM(test_call2_xsym),
3079 .prepare = test_store1_prepare,
3080 .check_result = test_call2_check,
3081 /* for now don't support function calls on 32 bit platform */
3082 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3085 .name = "test_call3",
3086 .arg_sz = sizeof(struct dummy_vect8),
3088 .ins = test_call3_prog,
3089 .nb_ins = RTE_DIM(test_call3_prog),
3091 .type = RTE_BPF_ARG_PTR,
3092 .size = sizeof(struct dummy_vect8),
3094 .xsym = test_call3_xsym,
3095 .nb_xsym = RTE_DIM(test_call3_xsym),
3097 .prepare = test_call3_prepare,
3098 .check_result = test_call3_check,
3099 /* for now don't support function calls on 32 bit platform */
3100 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3103 .name = "test_call4",
3104 .arg_sz = sizeof(struct dummy_offset),
3106 .ins = test_call4_prog,
3107 .nb_ins = RTE_DIM(test_call4_prog),
3109 .type = RTE_BPF_ARG_PTR,
3110 .size = 2 * sizeof(struct dummy_offset),
3112 .xsym = test_call4_xsym,
3113 .nb_xsym = RTE_DIM(test_call4_xsym),
3115 .prepare = test_store1_prepare,
3116 .check_result = test_call4_check,
3117 /* for now don't support function calls on 32 bit platform */
3118 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3121 .name = "test_call5",
3122 .arg_sz = sizeof(struct dummy_offset),
3124 .ins = test_call5_prog,
3125 .nb_ins = RTE_DIM(test_call5_prog),
3127 .type = RTE_BPF_ARG_PTR,
3128 .size = sizeof(struct dummy_offset),
3130 .xsym = test_call5_xsym,
3131 .nb_xsym = RTE_DIM(test_call5_xsym),
3133 .prepare = test_store1_prepare,
3134 .check_result = test_call5_check,
3135 /* for now don't support function calls on 32 bit platform */
3136 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3139 .name = "test_ld_mbuf1",
3140 .arg_sz = sizeof(struct dummy_mbuf),
3142 .ins = test_ld_mbuf1_prog,
3143 .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3145 .type = RTE_BPF_ARG_PTR_MBUF,
3146 .buf_size = sizeof(struct dummy_mbuf),
3149 .prepare = test_ld_mbuf1_prepare,
3150 .check_result = test_ld_mbuf1_check,
3151 /* mbuf as input argument is not supported on 32 bit platform */
3152 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3155 .name = "test_ld_mbuf2",
3156 .arg_sz = sizeof(struct dummy_mbuf),
3158 .ins = test_ld_mbuf1_prog,
3159 .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3161 .type = RTE_BPF_ARG_PTR_MBUF,
3162 .buf_size = sizeof(struct dummy_mbuf),
3165 .prepare = test_ld_mbuf2_prepare,
3166 .check_result = test_ld_mbuf2_check,
3167 /* mbuf as input argument is not supported on 32 bit platform */
3168 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3171 .name = "test_ld_mbuf3",
3172 .arg_sz = sizeof(struct dummy_mbuf),
3174 .ins = test_ld_mbuf3_prog,
3175 .nb_ins = RTE_DIM(test_ld_mbuf3_prog),
3177 .type = RTE_BPF_ARG_PTR_MBUF,
3178 .buf_size = sizeof(struct dummy_mbuf),
3181 .prepare = test_ld_mbuf1_prepare,
3182 .check_result = test_ld_mbuf1_check,
3183 /* mbuf as input argument is not supported on 32 bit platform */
3184 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3189 run_test(const struct bpf_test *tst)
3193 struct rte_bpf *bpf;
3194 struct rte_bpf_jit jit;
3195 uint8_t tbuf[tst->arg_sz];
3197 printf("%s(%s) start\n", __func__, tst->name);
3199 bpf = rte_bpf_load(&tst->prm);
3201 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3202 __func__, __LINE__, rte_errno, strerror(rte_errno));
3207 rc = rte_bpf_exec(bpf, tbuf);
3208 ret = tst->check_result(rc, tbuf);
3210 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
3211 __func__, __LINE__, tst->name, ret, strerror(ret));
3214 /* repeat the same test with jit, when possible */
3215 rte_bpf_get_jit(bpf, &jit);
3216 if (jit.func != NULL) {
3219 rc = jit.func(tbuf);
3220 rv = tst->check_result(rc, tbuf);
3223 printf("%s@%d: check_result(%s) failed, "
3225 __func__, __LINE__, tst->name,
3230 rte_bpf_destroy(bpf);
3242 for (i = 0; i != RTE_DIM(tests); i++) {
3243 rv = run_test(tests + i);
3244 if (tests[i].allow_fail == 0)
3251 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
3253 #ifdef RTE_HAS_LIBPCAP
3254 #include <pcap/pcap.h>
3257 test_bpf_dump(struct bpf_program *cbf, const struct rte_bpf_prm *prm)
3259 printf("cBPF program (%u insns)\n", cbf->bf_len);
3262 printf("\neBPF program (%u insns)\n", prm->nb_ins);
3263 rte_bpf_dump(stdout, prm->ins, prm->nb_ins);
3267 test_bpf_match(pcap_t *pcap, const char *str,
3268 struct rte_mbuf *mb)
3270 struct bpf_program fcode;
3271 struct rte_bpf_prm *prm = NULL;
3272 struct rte_bpf *bpf = NULL;
3276 if (pcap_compile(pcap, &fcode, str, 1, PCAP_NETMASK_UNKNOWN)) {
3277 printf("%s@%d: pcap_compile(\"%s\") failed: %s;\n",
3278 __func__, __LINE__, str, pcap_geterr(pcap));
3282 prm = rte_bpf_convert(&fcode);
3284 printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n",
3285 __func__, __LINE__, str, rte_errno, strerror(rte_errno));
3289 bpf = rte_bpf_load(prm);
3291 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3292 __func__, __LINE__, rte_errno, strerror(rte_errno));
3296 rc = rte_bpf_exec(bpf, mb);
3297 /* The return code from bpf capture filter is non-zero if matched */
3301 rte_bpf_destroy(bpf);
3303 pcap_freecode(&fcode);
3307 /* Basic sanity test can we match a IP packet */
3309 test_bpf_filter_sanity(pcap_t *pcap)
3311 const uint32_t plen = 100;
3312 struct rte_mbuf mb, *m;
3313 uint8_t tbuf[RTE_MBUF_DEFAULT_BUF_SIZE];
3315 struct rte_ether_hdr eth_hdr;
3316 struct rte_ipv4_hdr ip_hdr;
3319 dummy_mbuf_prep(&mb, tbuf, sizeof(tbuf), plen);
3322 hdr = rte_pktmbuf_mtod(m, typeof(hdr));
3323 hdr->eth_hdr = (struct rte_ether_hdr) {
3324 .dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3325 .ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4),
3327 hdr->ip_hdr = (struct rte_ipv4_hdr) {
3328 .version_ihl = RTE_IPV4_VHL_DEF,
3329 .total_length = rte_cpu_to_be_16(plen),
3330 .time_to_live = IPDEFTTL,
3331 .next_proto_id = IPPROTO_RAW,
3332 .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
3333 .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
3336 if (test_bpf_match(pcap, "ip", m) != 0) {
3337 printf("%s@%d: filter \"ip\" doesn't match test data\n",
3338 __func__, __LINE__);
3341 if (test_bpf_match(pcap, "not ip", m) == 0) {
3342 printf("%s@%d: filter \"not ip\" does match test data\n",
3343 __func__, __LINE__);
3351 * Some sample pcap filter strings from
3352 * https://wiki.wireshark.org/CaptureFilters
3354 static const char * const sample_filters[] = {
3356 "net 192.168.0.0/24",
3357 "src net 192.168.0.0/24",
3358 "src net 192.168.0.0 mask 255.255.255.0",
3359 "dst net 192.168.0.0/24",
3360 "dst net 192.168.0.0 mask 255.255.255.0",
3362 "host 192.0.2.1 and not (port 80 or port 25)",
3363 "host 2001:4b98:db0::8 and not port 80 and not port 25",
3364 "port not 53 and not arp",
3365 "(tcp[0:2] > 1500 and tcp[0:2] < 1550) or (tcp[2:2] > 1500 and tcp[2:2] < 1550)",
3366 "ether proto 0x888e",
3367 "ether[0] & 1 = 0 and ip[16] >= 224",
3368 "icmp[icmptype] != icmp-echo and icmp[icmptype] != icmp-echoreply",
3369 "tcp[tcpflags] & (tcp-syn|tcp-fin) != 0 and not src and dst net 127.0.0.1",
3370 "not ether dst 01:80:c2:00:00:0e",
3371 "not broadcast and not multicast",
3373 "port 80 and tcp[((tcp[12:1] & 0xf0) >> 2):4] = 0x47455420",
3375 "dst port 135 and tcp port 135 and ip[2:2]==48",
3376 "icmp[icmptype]==icmp-echo and ip[2:2]==92 and icmp[8:4]==0xAAAAAAAA",
3377 "dst port 135 or dst port 445 or dst port 1433"
3378 " and tcp[tcpflags] & (tcp-syn) != 0"
3379 " and tcp[tcpflags] & (tcp-ack) = 0 and src net 192.168.0.0/24",
3380 "tcp src port 443 and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4] = 0x18)"
3381 " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 1] = 0x03)"
3382 " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 2] < 0x04)"
3383 " and ((ip[2:2] - 4 * (ip[0] & 0x0F) - 4 * ((tcp[12] & 0xF0) >> 4) > 69))",
3389 test_bpf_filter(pcap_t *pcap, const char *s)
3391 struct bpf_program fcode;
3392 struct rte_bpf_prm *prm = NULL;
3393 struct rte_bpf *bpf = NULL;
3395 if (pcap_compile(pcap, &fcode, s, 1, PCAP_NETMASK_UNKNOWN)) {
3396 printf("%s@%d: pcap_compile('%s') failed: %s;\n",
3397 __func__, __LINE__, s, pcap_geterr(pcap));
3401 prm = rte_bpf_convert(&fcode);
3403 printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n",
3404 __func__, __LINE__, s, rte_errno, strerror(rte_errno));
3408 bpf = rte_bpf_load(prm);
3410 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3411 __func__, __LINE__, rte_errno, strerror(rte_errno));
3417 rte_bpf_destroy(bpf);
3419 printf("%s \"%s\"\n", __func__, s);
3420 test_bpf_dump(&fcode, prm);
3424 pcap_freecode(&fcode);
3425 return (bpf == NULL) ? -1 : 0;
3429 test_bpf_convert(void)
3435 pcap = pcap_open_dead(DLT_EN10MB, 262144);
3437 printf("pcap_open_dead failed\n");
3441 rc = test_bpf_filter_sanity(pcap);
3442 for (i = 0; i < RTE_DIM(sample_filters); i++)
3443 rc |= test_bpf_filter(pcap, sample_filters[i]);
3449 REGISTER_TEST_COMMAND(bpf_convert_autotest, test_bpf_convert);
3450 #endif /* RTE_HAS_LIBPCAP */