1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_random.h>
14 #include <rte_byteorder.h>
15 #include <rte_errno.h>
17 #include <rte_ether.h>
23 * Basic functional tests for librte_bpf.
24 * The main procedure - load eBPF program, execute it and
25 * compare restuls with expected values.
36 struct dummy_offset in[8];
37 struct dummy_offset out[8];
41 struct rte_ether_hdr eth_hdr;
42 struct rte_vlan_hdr vlan_hdr;
43 struct rte_ipv4_hdr ip_hdr;
46 #define TEST_FILL_1 0xDEADBEEF
49 #define TEST_MUL_2 -100
51 #define TEST_SHIFT_1 15
52 #define TEST_SHIFT_2 33
55 #define TEST_JCC_2 -123
56 #define TEST_JCC_3 5678
57 #define TEST_JCC_4 TEST_FILL_1
59 #define TEST_IMM_1 UINT64_MAX
60 #define TEST_IMM_2 ((uint64_t)INT64_MIN)
61 #define TEST_IMM_3 ((uint64_t)INT64_MAX + INT32_MAX)
62 #define TEST_IMM_4 ((uint64_t)UINT32_MAX)
63 #define TEST_IMM_5 ((uint64_t)UINT32_MAX + 1)
65 #define TEST_MEMFROB 0x2a2a2a2a
67 #define STRING_GEEK 0x6B656567
68 #define STRING_WEEK 0x6B656577
70 #define TEST_NETMASK 0xffffff00
71 #define TEST_SUBNET 0xaca80200
73 uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
74 uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
76 uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1;
77 uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2;
82 struct rte_bpf_prm prm;
83 void (*prepare)(void *);
84 int (*check_result)(uint64_t, const void *);
89 * Compare return value and result data with expected ones.
90 * Report a failure if they don't match.
93 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
94 const void *exp_res, const void *ret_res, size_t res_sz)
99 if (exp_rc != ret_rc) {
100 printf("%s@%d: invalid return value, expected: 0x%" PRIx64
101 ",result: 0x%" PRIx64 "\n",
102 func, __LINE__, exp_rc, ret_rc);
106 if (memcmp(exp_res, ret_res, res_sz) != 0) {
107 printf("%s: invalid value\n", func);
108 rte_memdump(stdout, "expected", exp_res, res_sz);
109 rte_memdump(stdout, "result", ret_res, res_sz);
116 /* store immediate test-cases */
117 static const struct ebpf_insn test_store1_prog[] = {
119 .code = (BPF_ST | BPF_MEM | BPF_B),
120 .dst_reg = EBPF_REG_1,
121 .off = offsetof(struct dummy_offset, u8),
125 .code = (BPF_ST | BPF_MEM | BPF_H),
126 .dst_reg = EBPF_REG_1,
127 .off = offsetof(struct dummy_offset, u16),
131 .code = (BPF_ST | BPF_MEM | BPF_W),
132 .dst_reg = EBPF_REG_1,
133 .off = offsetof(struct dummy_offset, u32),
137 .code = (BPF_ST | BPF_MEM | EBPF_DW),
138 .dst_reg = EBPF_REG_1,
139 .off = offsetof(struct dummy_offset, u64),
144 .code = (BPF_ALU | EBPF_MOV | BPF_K),
145 .dst_reg = EBPF_REG_0,
149 .code = (BPF_JMP | EBPF_EXIT),
154 test_store1_prepare(void *arg)
156 struct dummy_offset *df;
159 memset(df, 0, sizeof(*df));
163 test_store1_check(uint64_t rc, const void *arg)
165 const struct dummy_offset *dft;
166 struct dummy_offset dfe;
170 memset(&dfe, 0, sizeof(dfe));
171 dfe.u64 = (int32_t)TEST_FILL_1;
176 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
179 /* store register test-cases */
180 static const struct ebpf_insn test_store2_prog[] = {
183 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
184 .dst_reg = EBPF_REG_2,
188 .code = (BPF_STX | BPF_MEM | BPF_B),
189 .dst_reg = EBPF_REG_1,
190 .src_reg = EBPF_REG_2,
191 .off = offsetof(struct dummy_offset, u8),
194 .code = (BPF_STX | BPF_MEM | BPF_H),
195 .dst_reg = EBPF_REG_1,
196 .src_reg = EBPF_REG_2,
197 .off = offsetof(struct dummy_offset, u16),
200 .code = (BPF_STX | BPF_MEM | BPF_W),
201 .dst_reg = EBPF_REG_1,
202 .src_reg = EBPF_REG_2,
203 .off = offsetof(struct dummy_offset, u32),
206 .code = (BPF_STX | BPF_MEM | EBPF_DW),
207 .dst_reg = EBPF_REG_1,
208 .src_reg = EBPF_REG_2,
209 .off = offsetof(struct dummy_offset, u64),
213 .code = (BPF_ALU | EBPF_MOV | BPF_K),
214 .dst_reg = EBPF_REG_0,
218 .code = (BPF_JMP | EBPF_EXIT),
222 /* load test-cases */
223 static const struct ebpf_insn test_load1_prog[] = {
226 .code = (BPF_LDX | BPF_MEM | BPF_B),
227 .dst_reg = EBPF_REG_2,
228 .src_reg = EBPF_REG_1,
229 .off = offsetof(struct dummy_offset, u8),
232 .code = (BPF_LDX | BPF_MEM | BPF_H),
233 .dst_reg = EBPF_REG_3,
234 .src_reg = EBPF_REG_1,
235 .off = offsetof(struct dummy_offset, u16),
238 .code = (BPF_LDX | BPF_MEM | BPF_W),
239 .dst_reg = EBPF_REG_4,
240 .src_reg = EBPF_REG_1,
241 .off = offsetof(struct dummy_offset, u32),
244 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
245 .dst_reg = EBPF_REG_0,
246 .src_reg = EBPF_REG_1,
247 .off = offsetof(struct dummy_offset, u64),
251 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
252 .dst_reg = EBPF_REG_0,
253 .src_reg = EBPF_REG_4,
256 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
257 .dst_reg = EBPF_REG_0,
258 .src_reg = EBPF_REG_3,
261 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
262 .dst_reg = EBPF_REG_0,
263 .src_reg = EBPF_REG_2,
266 .code = (BPF_JMP | EBPF_EXIT),
271 test_load1_prepare(void *arg)
273 struct dummy_offset *df;
277 memset(df, 0, sizeof(*df));
278 df->u64 = (int32_t)TEST_FILL_1;
285 test_load1_check(uint64_t rc, const void *arg)
288 const struct dummy_offset *dft;
296 return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
299 /* load immediate test-cases */
300 static const struct ebpf_insn test_ldimm1_prog[] = {
303 .code = (BPF_LD | BPF_IMM | EBPF_DW),
304 .dst_reg = EBPF_REG_0,
305 .imm = (uint32_t)TEST_IMM_1,
308 .imm = TEST_IMM_1 >> 32,
311 .code = (BPF_LD | BPF_IMM | EBPF_DW),
312 .dst_reg = EBPF_REG_3,
313 .imm = (uint32_t)TEST_IMM_2,
316 .imm = TEST_IMM_2 >> 32,
319 .code = (BPF_LD | BPF_IMM | EBPF_DW),
320 .dst_reg = EBPF_REG_5,
321 .imm = (uint32_t)TEST_IMM_3,
324 .imm = TEST_IMM_3 >> 32,
327 .code = (BPF_LD | BPF_IMM | EBPF_DW),
328 .dst_reg = EBPF_REG_7,
329 .imm = (uint32_t)TEST_IMM_4,
332 .imm = TEST_IMM_4 >> 32,
335 .code = (BPF_LD | BPF_IMM | EBPF_DW),
336 .dst_reg = EBPF_REG_9,
337 .imm = (uint32_t)TEST_IMM_5,
340 .imm = TEST_IMM_5 >> 32,
344 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
345 .dst_reg = EBPF_REG_0,
346 .src_reg = EBPF_REG_3,
349 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
350 .dst_reg = EBPF_REG_0,
351 .src_reg = EBPF_REG_5,
354 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
355 .dst_reg = EBPF_REG_0,
356 .src_reg = EBPF_REG_7,
359 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
360 .dst_reg = EBPF_REG_0,
361 .src_reg = EBPF_REG_9,
364 .code = (BPF_JMP | EBPF_EXIT),
369 test_ldimm1_check(uint64_t rc, const void *arg)
383 return cmp_res(__func__, v1, rc, arg, arg, 0);
387 /* alu mul test-cases */
388 static const struct ebpf_insn test_mul1_prog[] = {
391 .code = (BPF_LDX | BPF_MEM | BPF_W),
392 .dst_reg = EBPF_REG_2,
393 .src_reg = EBPF_REG_1,
394 .off = offsetof(struct dummy_vect8, in[0].u32),
397 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
398 .dst_reg = EBPF_REG_3,
399 .src_reg = EBPF_REG_1,
400 .off = offsetof(struct dummy_vect8, in[1].u64),
403 .code = (BPF_LDX | BPF_MEM | BPF_W),
404 .dst_reg = EBPF_REG_4,
405 .src_reg = EBPF_REG_1,
406 .off = offsetof(struct dummy_vect8, in[2].u32),
409 .code = (BPF_ALU | BPF_MUL | BPF_K),
410 .dst_reg = EBPF_REG_2,
414 .code = (EBPF_ALU64 | BPF_MUL | BPF_K),
415 .dst_reg = EBPF_REG_3,
419 .code = (BPF_ALU | BPF_MUL | BPF_X),
420 .dst_reg = EBPF_REG_4,
421 .src_reg = EBPF_REG_2,
424 .code = (EBPF_ALU64 | BPF_MUL | BPF_X),
425 .dst_reg = EBPF_REG_4,
426 .src_reg = EBPF_REG_3,
429 .code = (BPF_STX | BPF_MEM | EBPF_DW),
430 .dst_reg = EBPF_REG_1,
431 .src_reg = EBPF_REG_2,
432 .off = offsetof(struct dummy_vect8, out[0].u64),
435 .code = (BPF_STX | BPF_MEM | EBPF_DW),
436 .dst_reg = EBPF_REG_1,
437 .src_reg = EBPF_REG_3,
438 .off = offsetof(struct dummy_vect8, out[1].u64),
441 .code = (BPF_STX | BPF_MEM | EBPF_DW),
442 .dst_reg = EBPF_REG_1,
443 .src_reg = EBPF_REG_4,
444 .off = offsetof(struct dummy_vect8, out[2].u64),
448 .code = (BPF_ALU | EBPF_MOV | BPF_K),
449 .dst_reg = EBPF_REG_0,
453 .code = (BPF_JMP | EBPF_EXIT),
458 test_mul1_prepare(void *arg)
460 struct dummy_vect8 *dv;
467 memset(dv, 0, sizeof(*dv));
469 dv->in[1].u64 = v << 12 | v >> 6;
474 test_mul1_check(uint64_t rc, const void *arg)
477 const struct dummy_vect8 *dvt;
478 struct dummy_vect8 dve;
481 memset(&dve, 0, sizeof(dve));
487 r2 = (uint32_t)r2 * TEST_MUL_1;
489 r4 = (uint32_t)(r4 * r2);
496 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
499 /* alu shift test-cases */
500 static const struct ebpf_insn test_shift1_prog[] = {
503 .code = (BPF_LDX | BPF_MEM | BPF_W),
504 .dst_reg = EBPF_REG_2,
505 .src_reg = EBPF_REG_1,
506 .off = offsetof(struct dummy_vect8, in[0].u32),
509 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
510 .dst_reg = EBPF_REG_3,
511 .src_reg = EBPF_REG_1,
512 .off = offsetof(struct dummy_vect8, in[1].u64),
515 .code = (BPF_LDX | BPF_MEM | BPF_W),
516 .dst_reg = EBPF_REG_4,
517 .src_reg = EBPF_REG_1,
518 .off = offsetof(struct dummy_vect8, in[2].u32),
521 .code = (BPF_ALU | BPF_LSH | BPF_K),
522 .dst_reg = EBPF_REG_2,
526 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
527 .dst_reg = EBPF_REG_3,
531 .code = (BPF_STX | BPF_MEM | EBPF_DW),
532 .dst_reg = EBPF_REG_1,
533 .src_reg = EBPF_REG_2,
534 .off = offsetof(struct dummy_vect8, out[0].u64),
537 .code = (BPF_STX | BPF_MEM | EBPF_DW),
538 .dst_reg = EBPF_REG_1,
539 .src_reg = EBPF_REG_3,
540 .off = offsetof(struct dummy_vect8, out[1].u64),
543 .code = (BPF_ALU | BPF_RSH | BPF_X),
544 .dst_reg = EBPF_REG_2,
545 .src_reg = EBPF_REG_4,
548 .code = (EBPF_ALU64 | BPF_LSH | BPF_X),
549 .dst_reg = EBPF_REG_3,
550 .src_reg = EBPF_REG_4,
553 .code = (BPF_STX | BPF_MEM | EBPF_DW),
554 .dst_reg = EBPF_REG_1,
555 .src_reg = EBPF_REG_2,
556 .off = offsetof(struct dummy_vect8, out[2].u64),
559 .code = (BPF_STX | BPF_MEM | EBPF_DW),
560 .dst_reg = EBPF_REG_1,
561 .src_reg = EBPF_REG_3,
562 .off = offsetof(struct dummy_vect8, out[3].u64),
565 .code = (BPF_LDX | BPF_MEM | BPF_W),
566 .dst_reg = EBPF_REG_2,
567 .src_reg = EBPF_REG_1,
568 .off = offsetof(struct dummy_vect8, in[0].u32),
571 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
572 .dst_reg = EBPF_REG_3,
573 .src_reg = EBPF_REG_1,
574 .off = offsetof(struct dummy_vect8, in[1].u64),
577 .code = (BPF_LDX | BPF_MEM | BPF_W),
578 .dst_reg = EBPF_REG_4,
579 .src_reg = EBPF_REG_1,
580 .off = offsetof(struct dummy_vect8, in[2].u32),
583 .code = (BPF_ALU | BPF_AND | BPF_K),
584 .dst_reg = EBPF_REG_2,
585 .imm = sizeof(uint64_t) * CHAR_BIT - 1,
588 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
589 .dst_reg = EBPF_REG_3,
590 .src_reg = EBPF_REG_2,
593 .code = (BPF_ALU | BPF_AND | BPF_K),
594 .dst_reg = EBPF_REG_2,
595 .imm = sizeof(uint32_t) * CHAR_BIT - 1,
598 .code = (BPF_ALU | BPF_LSH | BPF_X),
599 .dst_reg = EBPF_REG_4,
600 .src_reg = EBPF_REG_2,
603 .code = (BPF_STX | BPF_MEM | EBPF_DW),
604 .dst_reg = EBPF_REG_1,
605 .src_reg = EBPF_REG_4,
606 .off = offsetof(struct dummy_vect8, out[4].u64),
609 .code = (BPF_STX | BPF_MEM | EBPF_DW),
610 .dst_reg = EBPF_REG_1,
611 .src_reg = EBPF_REG_3,
612 .off = offsetof(struct dummy_vect8, out[5].u64),
616 .code = (BPF_ALU | EBPF_MOV | BPF_K),
617 .dst_reg = EBPF_REG_0,
621 .code = (BPF_JMP | EBPF_EXIT),
626 test_shift1_prepare(void *arg)
628 struct dummy_vect8 *dv;
635 memset(dv, 0, sizeof(*dv));
637 dv->in[1].u64 = v << 12 | v >> 6;
638 dv->in[2].u32 = (-v ^ 5);
642 test_shift1_check(uint64_t rc, const void *arg)
645 const struct dummy_vect8 *dvt;
646 struct dummy_vect8 dve;
649 memset(&dve, 0, sizeof(dve));
655 r2 = (uint32_t)r2 << TEST_SHIFT_1;
656 r3 = (int64_t)r3 >> TEST_SHIFT_2;
661 r2 = (uint32_t)r2 >> r4;
671 r2 &= sizeof(uint64_t) * CHAR_BIT - 1;
672 r3 = (int64_t)r3 >> r2;
673 r2 &= sizeof(uint32_t) * CHAR_BIT - 1;
674 r4 = (uint32_t)r4 << r2;
679 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
683 static const struct ebpf_insn test_jump1_prog[] = {
686 .code = (BPF_ALU | EBPF_MOV | BPF_K),
687 .dst_reg = EBPF_REG_0,
691 .code = (BPF_LDX | BPF_MEM | BPF_W),
692 .dst_reg = EBPF_REG_2,
693 .src_reg = EBPF_REG_1,
694 .off = offsetof(struct dummy_vect8, in[0].u32),
697 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
698 .dst_reg = EBPF_REG_3,
699 .src_reg = EBPF_REG_1,
700 .off = offsetof(struct dummy_vect8, in[0].u64),
703 .code = (BPF_LDX | BPF_MEM | BPF_W),
704 .dst_reg = EBPF_REG_4,
705 .src_reg = EBPF_REG_1,
706 .off = offsetof(struct dummy_vect8, in[1].u32),
709 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
710 .dst_reg = EBPF_REG_5,
711 .src_reg = EBPF_REG_1,
712 .off = offsetof(struct dummy_vect8, in[1].u64),
715 .code = (BPF_JMP | BPF_JEQ | BPF_K),
716 .dst_reg = EBPF_REG_2,
721 .code = (BPF_JMP | EBPF_JSLE | BPF_K),
722 .dst_reg = EBPF_REG_3,
727 .code = (BPF_JMP | BPF_JGT | BPF_K),
728 .dst_reg = EBPF_REG_4,
733 .code = (BPF_JMP | BPF_JSET | BPF_K),
734 .dst_reg = EBPF_REG_5,
739 .code = (BPF_JMP | EBPF_JNE | BPF_X),
740 .dst_reg = EBPF_REG_2,
741 .src_reg = EBPF_REG_3,
745 .code = (BPF_JMP | EBPF_JSGT | BPF_X),
746 .dst_reg = EBPF_REG_2,
747 .src_reg = EBPF_REG_4,
751 .code = (BPF_JMP | EBPF_JLE | BPF_X),
752 .dst_reg = EBPF_REG_2,
753 .src_reg = EBPF_REG_5,
757 .code = (BPF_JMP | BPF_JSET | BPF_X),
758 .dst_reg = EBPF_REG_3,
759 .src_reg = EBPF_REG_5,
763 .code = (BPF_JMP | EBPF_EXIT),
766 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
767 .dst_reg = EBPF_REG_0,
771 .code = (BPF_JMP | BPF_JA),
775 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
776 .dst_reg = EBPF_REG_0,
780 .code = (BPF_JMP | BPF_JA),
784 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
785 .dst_reg = EBPF_REG_0,
789 .code = (BPF_JMP | BPF_JA),
793 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
794 .dst_reg = EBPF_REG_0,
798 .code = (BPF_JMP | BPF_JA),
802 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
803 .dst_reg = EBPF_REG_0,
807 .code = (BPF_JMP | BPF_JA),
811 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
812 .dst_reg = EBPF_REG_0,
816 .code = (BPF_JMP | BPF_JA),
820 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
821 .dst_reg = EBPF_REG_0,
825 .code = (BPF_JMP | BPF_JA),
829 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
830 .dst_reg = EBPF_REG_0,
834 .code = (BPF_JMP | BPF_JA),
840 test_jump1_prepare(void *arg)
842 struct dummy_vect8 *dv;
850 memset(dv, 0, sizeof(*dv));
853 dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
854 dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
858 test_jump1_check(uint64_t rc, const void *arg)
860 uint64_t r2, r3, r4, r5, rv;
861 const struct dummy_vect8 *dvt;
871 if (r2 == TEST_JCC_1)
873 if ((int64_t)r3 <= TEST_JCC_2)
881 if ((int64_t)r2 > (int64_t)r4)
888 return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
891 /* Jump test case - check ip4_dest in particular subnet */
892 static const struct ebpf_insn test_jump2_prog[] = {
895 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
896 .dst_reg = EBPF_REG_2,
900 .code = (BPF_LDX | BPF_MEM | BPF_H),
901 .dst_reg = EBPF_REG_3,
902 .src_reg = EBPF_REG_1,
906 .code = (BPF_JMP | EBPF_JNE | BPF_K),
907 .dst_reg = EBPF_REG_3,
912 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
913 .dst_reg = EBPF_REG_2,
917 .code = (BPF_LDX | BPF_MEM | BPF_H),
918 .dst_reg = EBPF_REG_3,
919 .src_reg = EBPF_REG_1,
923 .code = (EBPF_ALU64 | BPF_AND | BPF_K),
924 .dst_reg = EBPF_REG_3,
928 .code = (BPF_JMP | EBPF_JNE | BPF_K),
929 .dst_reg = EBPF_REG_3,
934 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
935 .dst_reg = EBPF_REG_1,
936 .src_reg = EBPF_REG_2,
939 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
940 .dst_reg = EBPF_REG_0,
944 .code = (BPF_LDX | BPF_MEM | BPF_W),
945 .dst_reg = EBPF_REG_1,
946 .src_reg = EBPF_REG_1,
950 .code = (BPF_ALU | EBPF_MOV | BPF_K),
951 .dst_reg = EBPF_REG_3,
955 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
956 .dst_reg = EBPF_REG_3,
957 .imm = sizeof(uint32_t) * CHAR_BIT,
960 .code = (BPF_ALU | BPF_AND | BPF_X),
961 .dst_reg = EBPF_REG_1,
962 .src_reg = EBPF_REG_3,
965 .code = (BPF_ALU | EBPF_MOV | BPF_K),
966 .dst_reg = EBPF_REG_3,
970 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
971 .dst_reg = EBPF_REG_3,
972 .imm = sizeof(uint32_t) * CHAR_BIT,
975 .code = (BPF_JMP | BPF_JEQ | BPF_X),
976 .dst_reg = EBPF_REG_1,
977 .src_reg = EBPF_REG_3,
981 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
982 .dst_reg = EBPF_REG_0,
986 .code = (BPF_JMP | EBPF_EXIT),
990 /* Preparing a vlan packet */
992 test_jump2_prepare(void *arg)
994 struct dummy_net *dn;
997 memset(dn, 0, sizeof(*dn));
1000 * Initialize ether header.
1002 rte_ether_addr_copy((struct rte_ether_addr *)dst_mac,
1003 &dn->eth_hdr.d_addr);
1004 rte_ether_addr_copy((struct rte_ether_addr *)src_mac,
1005 &dn->eth_hdr.s_addr);
1006 dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1009 * Initialize vlan header.
1011 dn->vlan_hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1012 dn->vlan_hdr.vlan_tci = 32;
1015 * Initialize IP header.
1017 dn->ip_hdr.version_ihl = 0x45; /*IP_VERSION | IP_HDRLEN*/
1018 dn->ip_hdr.time_to_live = 64; /* IP_DEFTTL */
1019 dn->ip_hdr.next_proto_id = IPPROTO_TCP;
1020 dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c);
1021 dn->ip_hdr.total_length = rte_cpu_to_be_16(60);
1022 dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr);
1023 dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr);
1027 test_jump2_check(uint64_t rc, const void *arg)
1029 const struct rte_ether_hdr *eth_hdr = arg;
1030 const struct rte_ipv4_hdr *ipv4_hdr;
1031 const void *next = eth_hdr;
1035 if (eth_hdr->ether_type == htons(0x8100)) {
1036 const struct rte_vlan_hdr *vlan_hdr =
1037 (const void *)(eth_hdr + 1);
1038 eth_type = vlan_hdr->eth_proto;
1039 next = vlan_hdr + 1;
1041 eth_type = eth_hdr->ether_type;
1045 if (eth_type == htons(0x0800)) {
1047 if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) ==
1048 rte_cpu_to_be_32(TEST_SUBNET)) {
1053 return cmp_res(__func__, v, rc, arg, arg, sizeof(arg));
1056 /* alu (add, sub, and, or, xor, neg) test-cases */
1057 static const struct ebpf_insn test_alu1_prog[] = {
1060 .code = (BPF_LDX | BPF_MEM | BPF_W),
1061 .dst_reg = EBPF_REG_2,
1062 .src_reg = EBPF_REG_1,
1063 .off = offsetof(struct dummy_vect8, in[0].u32),
1066 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1067 .dst_reg = EBPF_REG_3,
1068 .src_reg = EBPF_REG_1,
1069 .off = offsetof(struct dummy_vect8, in[0].u64),
1072 .code = (BPF_LDX | BPF_MEM | BPF_W),
1073 .dst_reg = EBPF_REG_4,
1074 .src_reg = EBPF_REG_1,
1075 .off = offsetof(struct dummy_vect8, in[1].u32),
1078 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1079 .dst_reg = EBPF_REG_5,
1080 .src_reg = EBPF_REG_1,
1081 .off = offsetof(struct dummy_vect8, in[1].u64),
1084 .code = (BPF_ALU | BPF_AND | BPF_K),
1085 .dst_reg = EBPF_REG_2,
1089 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1090 .dst_reg = EBPF_REG_3,
1094 .code = (BPF_ALU | BPF_XOR | BPF_K),
1095 .dst_reg = EBPF_REG_4,
1099 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1100 .dst_reg = EBPF_REG_5,
1104 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1105 .dst_reg = EBPF_REG_1,
1106 .src_reg = EBPF_REG_2,
1107 .off = offsetof(struct dummy_vect8, out[0].u64),
1110 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1111 .dst_reg = EBPF_REG_1,
1112 .src_reg = EBPF_REG_3,
1113 .off = offsetof(struct dummy_vect8, out[1].u64),
1116 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1117 .dst_reg = EBPF_REG_1,
1118 .src_reg = EBPF_REG_4,
1119 .off = offsetof(struct dummy_vect8, out[2].u64),
1122 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1123 .dst_reg = EBPF_REG_1,
1124 .src_reg = EBPF_REG_5,
1125 .off = offsetof(struct dummy_vect8, out[3].u64),
1128 .code = (BPF_ALU | BPF_OR | BPF_X),
1129 .dst_reg = EBPF_REG_2,
1130 .src_reg = EBPF_REG_3,
1133 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
1134 .dst_reg = EBPF_REG_3,
1135 .src_reg = EBPF_REG_4,
1138 .code = (BPF_ALU | BPF_SUB | BPF_X),
1139 .dst_reg = EBPF_REG_4,
1140 .src_reg = EBPF_REG_5,
1143 .code = (EBPF_ALU64 | BPF_AND | BPF_X),
1144 .dst_reg = EBPF_REG_5,
1145 .src_reg = EBPF_REG_2,
1148 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1149 .dst_reg = EBPF_REG_1,
1150 .src_reg = EBPF_REG_2,
1151 .off = offsetof(struct dummy_vect8, out[4].u64),
1154 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1155 .dst_reg = EBPF_REG_1,
1156 .src_reg = EBPF_REG_3,
1157 .off = offsetof(struct dummy_vect8, out[5].u64),
1160 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1161 .dst_reg = EBPF_REG_1,
1162 .src_reg = EBPF_REG_4,
1163 .off = offsetof(struct dummy_vect8, out[6].u64),
1166 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1167 .dst_reg = EBPF_REG_1,
1168 .src_reg = EBPF_REG_5,
1169 .off = offsetof(struct dummy_vect8, out[7].u64),
1171 /* return (-r2 + (-r3)) */
1173 .code = (BPF_ALU | BPF_NEG),
1174 .dst_reg = EBPF_REG_2,
1177 .code = (EBPF_ALU64 | BPF_NEG),
1178 .dst_reg = EBPF_REG_3,
1181 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1182 .dst_reg = EBPF_REG_2,
1183 .src_reg = EBPF_REG_3,
1186 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1187 .dst_reg = EBPF_REG_0,
1188 .src_reg = EBPF_REG_2,
1191 .code = (BPF_JMP | EBPF_EXIT),
1196 test_alu1_check(uint64_t rc, const void *arg)
1198 uint64_t r2, r3, r4, r5, rv;
1199 const struct dummy_vect8 *dvt;
1200 struct dummy_vect8 dve;
1203 memset(&dve, 0, sizeof(dve));
1205 r2 = dvt->in[0].u32;
1206 r3 = dvt->in[0].u64;
1207 r4 = dvt->in[1].u32;
1208 r5 = dvt->in[1].u64;
1210 r2 = (uint32_t)r2 & TEST_FILL_1;
1211 r3 |= (int32_t) TEST_FILL_1;
1212 r4 = (uint32_t)r4 ^ TEST_FILL_1;
1213 r5 += (int32_t)TEST_FILL_1;
1215 dve.out[0].u64 = r2;
1216 dve.out[1].u64 = r3;
1217 dve.out[2].u64 = r4;
1218 dve.out[3].u64 = r5;
1220 r2 = (uint32_t)r2 | (uint32_t)r3;
1222 r4 = (uint32_t)r4 - (uint32_t)r5;
1225 dve.out[4].u64 = r2;
1226 dve.out[5].u64 = r3;
1227 dve.out[6].u64 = r4;
1228 dve.out[7].u64 = r5;
1235 return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1238 /* endianness conversions (BE->LE/LE->BE) test-cases */
1239 static const struct ebpf_insn test_bele1_prog[] = {
1242 .code = (BPF_LDX | BPF_MEM | BPF_H),
1243 .dst_reg = EBPF_REG_2,
1244 .src_reg = EBPF_REG_1,
1245 .off = offsetof(struct dummy_vect8, in[0].u16),
1248 .code = (BPF_LDX | BPF_MEM | BPF_W),
1249 .dst_reg = EBPF_REG_3,
1250 .src_reg = EBPF_REG_1,
1251 .off = offsetof(struct dummy_vect8, in[0].u32),
1254 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1255 .dst_reg = EBPF_REG_4,
1256 .src_reg = EBPF_REG_1,
1257 .off = offsetof(struct dummy_vect8, in[0].u64),
1260 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1261 .dst_reg = EBPF_REG_2,
1262 .imm = sizeof(uint16_t) * CHAR_BIT,
1265 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1266 .dst_reg = EBPF_REG_3,
1267 .imm = sizeof(uint32_t) * CHAR_BIT,
1270 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1271 .dst_reg = EBPF_REG_4,
1272 .imm = sizeof(uint64_t) * CHAR_BIT,
1275 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1276 .dst_reg = EBPF_REG_1,
1277 .src_reg = EBPF_REG_2,
1278 .off = offsetof(struct dummy_vect8, out[0].u64),
1281 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1282 .dst_reg = EBPF_REG_1,
1283 .src_reg = EBPF_REG_3,
1284 .off = offsetof(struct dummy_vect8, out[1].u64),
1287 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1288 .dst_reg = EBPF_REG_1,
1289 .src_reg = EBPF_REG_4,
1290 .off = offsetof(struct dummy_vect8, out[2].u64),
1293 .code = (BPF_LDX | BPF_MEM | BPF_H),
1294 .dst_reg = EBPF_REG_2,
1295 .src_reg = EBPF_REG_1,
1296 .off = offsetof(struct dummy_vect8, in[0].u16),
1299 .code = (BPF_LDX | BPF_MEM | BPF_W),
1300 .dst_reg = EBPF_REG_3,
1301 .src_reg = EBPF_REG_1,
1302 .off = offsetof(struct dummy_vect8, in[0].u32),
1305 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1306 .dst_reg = EBPF_REG_4,
1307 .src_reg = EBPF_REG_1,
1308 .off = offsetof(struct dummy_vect8, in[0].u64),
1311 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1312 .dst_reg = EBPF_REG_2,
1313 .imm = sizeof(uint16_t) * CHAR_BIT,
1316 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1317 .dst_reg = EBPF_REG_3,
1318 .imm = sizeof(uint32_t) * CHAR_BIT,
1321 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1322 .dst_reg = EBPF_REG_4,
1323 .imm = sizeof(uint64_t) * CHAR_BIT,
1326 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1327 .dst_reg = EBPF_REG_1,
1328 .src_reg = EBPF_REG_2,
1329 .off = offsetof(struct dummy_vect8, out[3].u64),
1332 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1333 .dst_reg = EBPF_REG_1,
1334 .src_reg = EBPF_REG_3,
1335 .off = offsetof(struct dummy_vect8, out[4].u64),
1338 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1339 .dst_reg = EBPF_REG_1,
1340 .src_reg = EBPF_REG_4,
1341 .off = offsetof(struct dummy_vect8, out[5].u64),
1345 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1346 .dst_reg = EBPF_REG_0,
1350 .code = (BPF_JMP | EBPF_EXIT),
1355 test_bele1_prepare(void *arg)
1357 struct dummy_vect8 *dv;
1361 memset(dv, 0, sizeof(*dv));
1362 dv->in[0].u64 = rte_rand();
1363 dv->in[0].u32 = dv->in[0].u64;
1364 dv->in[0].u16 = dv->in[0].u64;
1368 test_bele1_check(uint64_t rc, const void *arg)
1370 uint64_t r2, r3, r4;
1371 const struct dummy_vect8 *dvt;
1372 struct dummy_vect8 dve;
1375 memset(&dve, 0, sizeof(dve));
1377 r2 = dvt->in[0].u16;
1378 r3 = dvt->in[0].u32;
1379 r4 = dvt->in[0].u64;
1381 r2 = rte_cpu_to_be_16(r2);
1382 r3 = rte_cpu_to_be_32(r3);
1383 r4 = rte_cpu_to_be_64(r4);
1385 dve.out[0].u64 = r2;
1386 dve.out[1].u64 = r3;
1387 dve.out[2].u64 = r4;
1389 r2 = dvt->in[0].u16;
1390 r3 = dvt->in[0].u32;
1391 r4 = dvt->in[0].u64;
1393 r2 = rte_cpu_to_le_16(r2);
1394 r3 = rte_cpu_to_le_32(r3);
1395 r4 = rte_cpu_to_le_64(r4);
1397 dve.out[3].u64 = r2;
1398 dve.out[4].u64 = r3;
1399 dve.out[5].u64 = r4;
1401 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1404 /* atomic add test-cases */
1405 static const struct ebpf_insn test_xadd1_prog[] = {
1408 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1409 .dst_reg = EBPF_REG_2,
1413 .code = (BPF_STX | EBPF_XADD | BPF_W),
1414 .dst_reg = EBPF_REG_1,
1415 .src_reg = EBPF_REG_2,
1416 .off = offsetof(struct dummy_offset, u32),
1419 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1420 .dst_reg = EBPF_REG_1,
1421 .src_reg = EBPF_REG_2,
1422 .off = offsetof(struct dummy_offset, u64),
1425 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1426 .dst_reg = EBPF_REG_3,
1430 .code = (BPF_STX | EBPF_XADD | BPF_W),
1431 .dst_reg = EBPF_REG_1,
1432 .src_reg = EBPF_REG_3,
1433 .off = offsetof(struct dummy_offset, u32),
1436 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1437 .dst_reg = EBPF_REG_1,
1438 .src_reg = EBPF_REG_3,
1439 .off = offsetof(struct dummy_offset, u64),
1442 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1443 .dst_reg = EBPF_REG_4,
1447 .code = (BPF_STX | EBPF_XADD | BPF_W),
1448 .dst_reg = EBPF_REG_1,
1449 .src_reg = EBPF_REG_4,
1450 .off = offsetof(struct dummy_offset, u32),
1453 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1454 .dst_reg = EBPF_REG_1,
1455 .src_reg = EBPF_REG_4,
1456 .off = offsetof(struct dummy_offset, u64),
1459 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1460 .dst_reg = EBPF_REG_5,
1464 .code = (BPF_STX | EBPF_XADD | BPF_W),
1465 .dst_reg = EBPF_REG_1,
1466 .src_reg = EBPF_REG_5,
1467 .off = offsetof(struct dummy_offset, u32),
1470 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1471 .dst_reg = EBPF_REG_1,
1472 .src_reg = EBPF_REG_5,
1473 .off = offsetof(struct dummy_offset, u64),
1476 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1477 .dst_reg = EBPF_REG_6,
1481 .code = (BPF_STX | EBPF_XADD | BPF_W),
1482 .dst_reg = EBPF_REG_1,
1483 .src_reg = EBPF_REG_6,
1484 .off = offsetof(struct dummy_offset, u32),
1487 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1488 .dst_reg = EBPF_REG_1,
1489 .src_reg = EBPF_REG_6,
1490 .off = offsetof(struct dummy_offset, u64),
1493 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1494 .dst_reg = EBPF_REG_7,
1498 .code = (BPF_STX | EBPF_XADD | BPF_W),
1499 .dst_reg = EBPF_REG_1,
1500 .src_reg = EBPF_REG_7,
1501 .off = offsetof(struct dummy_offset, u32),
1504 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1505 .dst_reg = EBPF_REG_1,
1506 .src_reg = EBPF_REG_7,
1507 .off = offsetof(struct dummy_offset, u64),
1510 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1511 .dst_reg = EBPF_REG_8,
1515 .code = (BPF_STX | EBPF_XADD | BPF_W),
1516 .dst_reg = EBPF_REG_1,
1517 .src_reg = EBPF_REG_8,
1518 .off = offsetof(struct dummy_offset, u32),
1521 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1522 .dst_reg = EBPF_REG_1,
1523 .src_reg = EBPF_REG_8,
1524 .off = offsetof(struct dummy_offset, u64),
1528 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1529 .dst_reg = EBPF_REG_0,
1533 .code = (BPF_JMP | EBPF_EXIT),
1538 test_xadd1_check(uint64_t rc, const void *arg)
1541 const struct dummy_offset *dft;
1542 struct dummy_offset dfe;
1545 memset(&dfe, 0, sizeof(dfe));
1548 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1549 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1552 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1553 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1555 rv = (int32_t)TEST_FILL_1;
1556 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1557 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1560 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1561 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1564 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1565 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1568 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1569 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1572 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1573 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1575 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1578 /* alu div test-cases */
1579 static const struct ebpf_insn test_div1_prog[] = {
1582 .code = (BPF_LDX | BPF_MEM | BPF_W),
1583 .dst_reg = EBPF_REG_2,
1584 .src_reg = EBPF_REG_1,
1585 .off = offsetof(struct dummy_vect8, in[0].u32),
1588 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1589 .dst_reg = EBPF_REG_3,
1590 .src_reg = EBPF_REG_1,
1591 .off = offsetof(struct dummy_vect8, in[1].u64),
1594 .code = (BPF_LDX | BPF_MEM | BPF_W),
1595 .dst_reg = EBPF_REG_4,
1596 .src_reg = EBPF_REG_1,
1597 .off = offsetof(struct dummy_vect8, in[2].u32),
1600 .code = (BPF_ALU | BPF_DIV | BPF_K),
1601 .dst_reg = EBPF_REG_2,
1605 .code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1606 .dst_reg = EBPF_REG_3,
1610 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1611 .dst_reg = EBPF_REG_2,
1615 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1616 .dst_reg = EBPF_REG_3,
1620 .code = (BPF_ALU | BPF_MOD | BPF_X),
1621 .dst_reg = EBPF_REG_4,
1622 .src_reg = EBPF_REG_2,
1625 .code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1626 .dst_reg = EBPF_REG_4,
1627 .src_reg = EBPF_REG_3,
1630 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1631 .dst_reg = EBPF_REG_1,
1632 .src_reg = EBPF_REG_2,
1633 .off = offsetof(struct dummy_vect8, out[0].u64),
1636 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1637 .dst_reg = EBPF_REG_1,
1638 .src_reg = EBPF_REG_3,
1639 .off = offsetof(struct dummy_vect8, out[1].u64),
1642 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1643 .dst_reg = EBPF_REG_1,
1644 .src_reg = EBPF_REG_4,
1645 .off = offsetof(struct dummy_vect8, out[2].u64),
1647 /* check that we can handle division by zero gracefully. */
1649 .code = (BPF_LDX | BPF_MEM | BPF_W),
1650 .dst_reg = EBPF_REG_2,
1651 .src_reg = EBPF_REG_1,
1652 .off = offsetof(struct dummy_vect8, in[3].u32),
1655 .code = (BPF_ALU | BPF_DIV | BPF_X),
1656 .dst_reg = EBPF_REG_4,
1657 .src_reg = EBPF_REG_2,
1661 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1662 .dst_reg = EBPF_REG_0,
1666 .code = (BPF_JMP | EBPF_EXIT),
1671 test_div1_check(uint64_t rc, const void *arg)
1673 uint64_t r2, r3, r4;
1674 const struct dummy_vect8 *dvt;
1675 struct dummy_vect8 dve;
1678 memset(&dve, 0, sizeof(dve));
1680 r2 = dvt->in[0].u32;
1681 r3 = dvt->in[1].u64;
1682 r4 = dvt->in[2].u32;
1684 r2 = (uint32_t)r2 / TEST_MUL_1;
1688 r4 = (uint32_t)(r4 % r2);
1691 dve.out[0].u64 = r2;
1692 dve.out[1].u64 = r3;
1693 dve.out[2].u64 = r4;
1696 * in the test prog we attempted to divide by zero.
1697 * so return value should return 0.
1699 return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1702 /* call test-cases */
1703 static const struct ebpf_insn test_call1_prog[] = {
1706 .code = (BPF_LDX | BPF_MEM | BPF_W),
1707 .dst_reg = EBPF_REG_2,
1708 .src_reg = EBPF_REG_1,
1709 .off = offsetof(struct dummy_offset, u32),
1712 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1713 .dst_reg = EBPF_REG_3,
1714 .src_reg = EBPF_REG_1,
1715 .off = offsetof(struct dummy_offset, u64),
1718 .code = (BPF_STX | BPF_MEM | BPF_W),
1719 .dst_reg = EBPF_REG_10,
1720 .src_reg = EBPF_REG_2,
1724 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1725 .dst_reg = EBPF_REG_10,
1726 .src_reg = EBPF_REG_3,
1730 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1731 .dst_reg = EBPF_REG_2,
1732 .src_reg = EBPF_REG_10,
1735 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1736 .dst_reg = EBPF_REG_2,
1740 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1741 .dst_reg = EBPF_REG_3,
1742 .src_reg = EBPF_REG_10,
1745 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1746 .dst_reg = EBPF_REG_3,
1750 .code = (BPF_JMP | EBPF_CALL),
1754 .code = (BPF_LDX | BPF_MEM | BPF_W),
1755 .dst_reg = EBPF_REG_2,
1756 .src_reg = EBPF_REG_10,
1760 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1761 .dst_reg = EBPF_REG_0,
1762 .src_reg = EBPF_REG_10,
1766 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1767 .dst_reg = EBPF_REG_0,
1768 .src_reg = EBPF_REG_2,
1771 .code = (BPF_JMP | EBPF_EXIT),
1776 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1778 const struct dummy_offset *dv;
1787 test_call1_check(uint64_t rc, const void *arg)
1791 const struct dummy_offset *dv;
1797 dummy_func1(arg, &v32, &v64);
1801 printf("%s@%d: invalid return value "
1802 "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
1803 __func__, __LINE__, v64, rc);
1807 return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1810 static const struct rte_bpf_xsym test_call1_xsym[] = {
1812 .name = RTE_STR(dummy_func1),
1813 .type = RTE_BPF_XTYPE_FUNC,
1815 .val = (void *)dummy_func1,
1819 .type = RTE_BPF_ARG_PTR,
1820 .size = sizeof(struct dummy_offset),
1823 .type = RTE_BPF_ARG_PTR,
1824 .size = sizeof(uint32_t),
1827 .type = RTE_BPF_ARG_PTR,
1828 .size = sizeof(uint64_t),
1835 static const struct ebpf_insn test_call2_prog[] = {
1838 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1839 .dst_reg = EBPF_REG_1,
1840 .src_reg = EBPF_REG_10,
1843 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1844 .dst_reg = EBPF_REG_1,
1845 .imm = -(int32_t)sizeof(struct dummy_offset),
1848 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1849 .dst_reg = EBPF_REG_2,
1850 .src_reg = EBPF_REG_10,
1853 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1854 .dst_reg = EBPF_REG_2,
1855 .imm = -2 * (int32_t)sizeof(struct dummy_offset),
1858 .code = (BPF_JMP | EBPF_CALL),
1862 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1863 .dst_reg = EBPF_REG_1,
1864 .src_reg = EBPF_REG_10,
1865 .off = -(int32_t)(sizeof(struct dummy_offset) -
1866 offsetof(struct dummy_offset, u64)),
1869 .code = (BPF_LDX | BPF_MEM | BPF_W),
1870 .dst_reg = EBPF_REG_0,
1871 .src_reg = EBPF_REG_10,
1872 .off = -(int32_t)(sizeof(struct dummy_offset) -
1873 offsetof(struct dummy_offset, u32)),
1876 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1877 .dst_reg = EBPF_REG_0,
1878 .src_reg = EBPF_REG_1,
1881 .code = (BPF_LDX | BPF_MEM | BPF_H),
1882 .dst_reg = EBPF_REG_1,
1883 .src_reg = EBPF_REG_10,
1884 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1885 offsetof(struct dummy_offset, u16)),
1888 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1889 .dst_reg = EBPF_REG_0,
1890 .src_reg = EBPF_REG_1,
1893 .code = (BPF_LDX | BPF_MEM | BPF_B),
1894 .dst_reg = EBPF_REG_1,
1895 .src_reg = EBPF_REG_10,
1896 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1897 offsetof(struct dummy_offset, u8)),
1900 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1901 .dst_reg = EBPF_REG_0,
1902 .src_reg = EBPF_REG_1,
1905 .code = (BPF_JMP | EBPF_EXIT),
1911 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1927 test_call2_check(uint64_t rc, const void *arg)
1930 struct dummy_offset a, b;
1934 dummy_func2(&a, &b);
1935 v = a.u64 + a.u32 + b.u16 + b.u8;
1938 printf("%s@%d: invalid return value "
1939 "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
1940 __func__, __LINE__, v, rc);
1946 static const struct rte_bpf_xsym test_call2_xsym[] = {
1948 .name = RTE_STR(dummy_func2),
1949 .type = RTE_BPF_XTYPE_FUNC,
1951 .val = (void *)dummy_func2,
1955 .type = RTE_BPF_ARG_PTR,
1956 .size = sizeof(struct dummy_offset),
1959 .type = RTE_BPF_ARG_PTR,
1960 .size = sizeof(struct dummy_offset),
1967 static const struct ebpf_insn test_call3_prog[] = {
1970 .code = (BPF_JMP | EBPF_CALL),
1974 .code = (BPF_LDX | BPF_MEM | BPF_B),
1975 .dst_reg = EBPF_REG_2,
1976 .src_reg = EBPF_REG_0,
1977 .off = offsetof(struct dummy_offset, u8),
1980 .code = (BPF_LDX | BPF_MEM | BPF_H),
1981 .dst_reg = EBPF_REG_3,
1982 .src_reg = EBPF_REG_0,
1983 .off = offsetof(struct dummy_offset, u16),
1986 .code = (BPF_LDX | BPF_MEM | BPF_W),
1987 .dst_reg = EBPF_REG_4,
1988 .src_reg = EBPF_REG_0,
1989 .off = offsetof(struct dummy_offset, u32),
1992 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1993 .dst_reg = EBPF_REG_0,
1994 .src_reg = EBPF_REG_0,
1995 .off = offsetof(struct dummy_offset, u64),
1999 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2000 .dst_reg = EBPF_REG_0,
2001 .src_reg = EBPF_REG_4,
2004 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2005 .dst_reg = EBPF_REG_0,
2006 .src_reg = EBPF_REG_3,
2009 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2010 .dst_reg = EBPF_REG_0,
2011 .src_reg = EBPF_REG_2,
2014 .code = (BPF_JMP | EBPF_EXIT),
2018 static const struct dummy_offset *
2019 dummy_func3(const struct dummy_vect8 *p)
2021 return &p->in[RTE_DIM(p->in) - 1];
2025 test_call3_prepare(void *arg)
2027 struct dummy_vect8 *pv;
2028 struct dummy_offset *df;
2031 df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv);
2033 memset(pv, 0, sizeof(*pv));
2034 df->u64 = (int32_t)TEST_FILL_1;
2041 test_call3_check(uint64_t rc, const void *arg)
2044 const struct dummy_vect8 *pv;
2045 const struct dummy_offset *dft;
2048 dft = dummy_func3(pv);
2055 return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv));
2058 static const struct rte_bpf_xsym test_call3_xsym[] = {
2060 .name = RTE_STR(dummy_func3),
2061 .type = RTE_BPF_XTYPE_FUNC,
2063 .val = (void *)dummy_func3,
2067 .type = RTE_BPF_ARG_PTR,
2068 .size = sizeof(struct dummy_vect8),
2072 .type = RTE_BPF_ARG_PTR,
2073 .size = sizeof(struct dummy_offset),
2079 /* Test for stack corruption in multiple function calls */
2080 static const struct ebpf_insn test_call4_prog[] = {
2082 .code = (BPF_ST | BPF_MEM | BPF_B),
2083 .dst_reg = EBPF_REG_10,
2088 .code = (BPF_ST | BPF_MEM | BPF_B),
2089 .dst_reg = EBPF_REG_10,
2094 .code = (BPF_ST | BPF_MEM | BPF_B),
2095 .dst_reg = EBPF_REG_10,
2100 .code = (BPF_ST | BPF_MEM | BPF_B),
2101 .dst_reg = EBPF_REG_10,
2106 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2107 .dst_reg = EBPF_REG_1,
2108 .src_reg = EBPF_REG_10,
2111 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2112 .dst_reg = EBPF_REG_2,
2116 .code = (EBPF_ALU64 | BPF_SUB | BPF_X),
2117 .dst_reg = EBPF_REG_1,
2118 .src_reg = EBPF_REG_2,
2121 .code = (BPF_JMP | EBPF_CALL),
2125 .code = (BPF_LDX | BPF_MEM | BPF_B),
2126 .dst_reg = EBPF_REG_1,
2127 .src_reg = EBPF_REG_10,
2131 .code = (BPF_LDX | BPF_MEM | BPF_B),
2132 .dst_reg = EBPF_REG_2,
2133 .src_reg = EBPF_REG_10,
2137 .code = (BPF_LDX | BPF_MEM | BPF_B),
2138 .dst_reg = EBPF_REG_3,
2139 .src_reg = EBPF_REG_10,
2143 .code = (BPF_LDX | BPF_MEM | BPF_B),
2144 .dst_reg = EBPF_REG_4,
2145 .src_reg = EBPF_REG_10,
2149 .code = (BPF_JMP | EBPF_CALL),
2153 .code = (EBPF_ALU64 | BPF_XOR | BPF_K),
2154 .dst_reg = EBPF_REG_0,
2155 .imm = TEST_MEMFROB,
2158 .code = (BPF_JMP | EBPF_EXIT),
2162 /* Gathering the bytes together */
2164 dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
2166 return (a << 24) | (b << 16) | (c << 8) | (d << 0);
2169 /* Implementation of memfrob */
2171 dummy_func4_0(uint32_t *s, uint8_t n)
2173 char *p = (char *) s;
2181 test_call4_check(uint64_t rc, const void *arg)
2183 uint8_t a[4] = {1, 2, 3, 4};
2188 s = dummy_func4_0((uint32_t *)a, 4);
2190 s = dummy_func4_1(a[0], a[1], a[2], a[3]);
2192 v = s ^ TEST_MEMFROB;
2194 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2197 static const struct rte_bpf_xsym test_call4_xsym[] = {
2199 .name = RTE_STR(dummy_func4_0),
2200 .type = RTE_BPF_XTYPE_FUNC,
2202 .val = (void *)dummy_func4_0,
2206 .type = RTE_BPF_ARG_PTR,
2207 .size = 4 * sizeof(uint8_t),
2210 .type = RTE_BPF_ARG_RAW,
2211 .size = sizeof(uint8_t),
2215 .type = RTE_BPF_ARG_RAW,
2216 .size = sizeof(uint32_t),
2221 .name = RTE_STR(dummy_func4_1),
2222 .type = RTE_BPF_XTYPE_FUNC,
2224 .val = (void *)dummy_func4_1,
2228 .type = RTE_BPF_ARG_RAW,
2229 .size = sizeof(uint8_t),
2232 .type = RTE_BPF_ARG_RAW,
2233 .size = sizeof(uint8_t),
2236 .type = RTE_BPF_ARG_RAW,
2237 .size = sizeof(uint8_t),
2240 .type = RTE_BPF_ARG_RAW,
2241 .size = sizeof(uint8_t),
2245 .type = RTE_BPF_ARG_RAW,
2246 .size = sizeof(uint32_t),
2252 /* string compare test case */
2253 static const struct ebpf_insn test_call5_prog[] = {
2256 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2257 .dst_reg = EBPF_REG_1,
2261 .code = (BPF_STX | BPF_MEM | BPF_W),
2262 .dst_reg = EBPF_REG_10,
2263 .src_reg = EBPF_REG_1,
2267 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2268 .dst_reg = EBPF_REG_6,
2272 .code = (BPF_STX | BPF_MEM | BPF_B),
2273 .dst_reg = EBPF_REG_10,
2274 .src_reg = EBPF_REG_6,
2278 .code = (BPF_STX | BPF_MEM | BPF_W),
2279 .dst_reg = EBPF_REG_10,
2280 .src_reg = EBPF_REG_6,
2284 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2285 .dst_reg = EBPF_REG_1,
2289 .code = (BPF_STX | BPF_MEM | BPF_W),
2290 .dst_reg = EBPF_REG_10,
2291 .src_reg = EBPF_REG_1,
2295 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2296 .dst_reg = EBPF_REG_1,
2297 .src_reg = EBPF_REG_10,
2300 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2301 .dst_reg = EBPF_REG_1,
2305 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2306 .dst_reg = EBPF_REG_2,
2307 .src_reg = EBPF_REG_1,
2310 .code = (BPF_JMP | EBPF_CALL),
2314 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2315 .dst_reg = EBPF_REG_1,
2316 .src_reg = EBPF_REG_0,
2319 .code = (BPF_ALU | EBPF_MOV | BPF_K),
2320 .dst_reg = EBPF_REG_0,
2324 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2325 .dst_reg = EBPF_REG_1,
2329 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2330 .dst_reg = EBPF_REG_1,
2334 .code = (BPF_JMP | EBPF_JNE | BPF_K),
2335 .dst_reg = EBPF_REG_1,
2340 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2341 .dst_reg = EBPF_REG_1,
2342 .src_reg = EBPF_REG_10,
2345 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2346 .dst_reg = EBPF_REG_1,
2350 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2351 .dst_reg = EBPF_REG_2,
2352 .src_reg = EBPF_REG_10,
2355 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2356 .dst_reg = EBPF_REG_2,
2360 .code = (BPF_JMP | EBPF_CALL),
2364 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2365 .dst_reg = EBPF_REG_1,
2366 .src_reg = EBPF_REG_0,
2369 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2370 .dst_reg = EBPF_REG_1,
2374 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2375 .dst_reg = EBPF_REG_1,
2379 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2380 .dst_reg = EBPF_REG_0,
2381 .src_reg = EBPF_REG_1,
2384 .code = (BPF_JMP | BPF_JEQ | BPF_X),
2385 .dst_reg = EBPF_REG_1,
2386 .src_reg = EBPF_REG_6,
2390 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2391 .dst_reg = EBPF_REG_0,
2395 .code = (BPF_JMP | EBPF_EXIT),
2399 /* String comparision impelementation, return 0 if equal else difference */
2401 dummy_func5(const char *s1, const char *s2)
2403 while (*s1 && (*s1 == *s2)) {
2407 return *(const unsigned char *)s1 - *(const unsigned char *)s2;
2411 test_call5_check(uint64_t rc, const void *arg)
2419 v = dummy_func5(a, a);
2425 v = dummy_func5(a, b);
2433 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2436 static const struct rte_bpf_xsym test_call5_xsym[] = {
2438 .name = RTE_STR(dummy_func5),
2439 .type = RTE_BPF_XTYPE_FUNC,
2441 .val = (void *)dummy_func5,
2445 .type = RTE_BPF_ARG_PTR,
2446 .size = sizeof(char),
2449 .type = RTE_BPF_ARG_PTR,
2450 .size = sizeof(char),
2454 .type = RTE_BPF_ARG_RAW,
2455 .size = sizeof(uint32_t),
2461 static const struct bpf_test tests[] = {
2463 .name = "test_store1",
2464 .arg_sz = sizeof(struct dummy_offset),
2466 .ins = test_store1_prog,
2467 .nb_ins = RTE_DIM(test_store1_prog),
2469 .type = RTE_BPF_ARG_PTR,
2470 .size = sizeof(struct dummy_offset),
2473 .prepare = test_store1_prepare,
2474 .check_result = test_store1_check,
2477 .name = "test_store2",
2478 .arg_sz = sizeof(struct dummy_offset),
2480 .ins = test_store2_prog,
2481 .nb_ins = RTE_DIM(test_store2_prog),
2483 .type = RTE_BPF_ARG_PTR,
2484 .size = sizeof(struct dummy_offset),
2487 .prepare = test_store1_prepare,
2488 .check_result = test_store1_check,
2491 .name = "test_load1",
2492 .arg_sz = sizeof(struct dummy_offset),
2494 .ins = test_load1_prog,
2495 .nb_ins = RTE_DIM(test_load1_prog),
2497 .type = RTE_BPF_ARG_PTR,
2498 .size = sizeof(struct dummy_offset),
2501 .prepare = test_load1_prepare,
2502 .check_result = test_load1_check,
2505 .name = "test_ldimm1",
2506 .arg_sz = sizeof(struct dummy_offset),
2508 .ins = test_ldimm1_prog,
2509 .nb_ins = RTE_DIM(test_ldimm1_prog),
2511 .type = RTE_BPF_ARG_PTR,
2512 .size = sizeof(struct dummy_offset),
2515 .prepare = test_store1_prepare,
2516 .check_result = test_ldimm1_check,
2519 .name = "test_mul1",
2520 .arg_sz = sizeof(struct dummy_vect8),
2522 .ins = test_mul1_prog,
2523 .nb_ins = RTE_DIM(test_mul1_prog),
2525 .type = RTE_BPF_ARG_PTR,
2526 .size = sizeof(struct dummy_vect8),
2529 .prepare = test_mul1_prepare,
2530 .check_result = test_mul1_check,
2533 .name = "test_shift1",
2534 .arg_sz = sizeof(struct dummy_vect8),
2536 .ins = test_shift1_prog,
2537 .nb_ins = RTE_DIM(test_shift1_prog),
2539 .type = RTE_BPF_ARG_PTR,
2540 .size = sizeof(struct dummy_vect8),
2543 .prepare = test_shift1_prepare,
2544 .check_result = test_shift1_check,
2547 .name = "test_jump1",
2548 .arg_sz = sizeof(struct dummy_vect8),
2550 .ins = test_jump1_prog,
2551 .nb_ins = RTE_DIM(test_jump1_prog),
2553 .type = RTE_BPF_ARG_PTR,
2554 .size = sizeof(struct dummy_vect8),
2557 .prepare = test_jump1_prepare,
2558 .check_result = test_jump1_check,
2561 .name = "test_jump2",
2562 .arg_sz = sizeof(struct dummy_net),
2564 .ins = test_jump2_prog,
2565 .nb_ins = RTE_DIM(test_jump2_prog),
2567 .type = RTE_BPF_ARG_PTR,
2568 .size = sizeof(struct dummy_net),
2571 .prepare = test_jump2_prepare,
2572 .check_result = test_jump2_check,
2575 .name = "test_alu1",
2576 .arg_sz = sizeof(struct dummy_vect8),
2578 .ins = test_alu1_prog,
2579 .nb_ins = RTE_DIM(test_alu1_prog),
2581 .type = RTE_BPF_ARG_PTR,
2582 .size = sizeof(struct dummy_vect8),
2585 .prepare = test_jump1_prepare,
2586 .check_result = test_alu1_check,
2589 .name = "test_bele1",
2590 .arg_sz = sizeof(struct dummy_vect8),
2592 .ins = test_bele1_prog,
2593 .nb_ins = RTE_DIM(test_bele1_prog),
2595 .type = RTE_BPF_ARG_PTR,
2596 .size = sizeof(struct dummy_vect8),
2599 .prepare = test_bele1_prepare,
2600 .check_result = test_bele1_check,
2603 .name = "test_xadd1",
2604 .arg_sz = sizeof(struct dummy_offset),
2606 .ins = test_xadd1_prog,
2607 .nb_ins = RTE_DIM(test_xadd1_prog),
2609 .type = RTE_BPF_ARG_PTR,
2610 .size = sizeof(struct dummy_offset),
2613 .prepare = test_store1_prepare,
2614 .check_result = test_xadd1_check,
2617 .name = "test_div1",
2618 .arg_sz = sizeof(struct dummy_vect8),
2620 .ins = test_div1_prog,
2621 .nb_ins = RTE_DIM(test_div1_prog),
2623 .type = RTE_BPF_ARG_PTR,
2624 .size = sizeof(struct dummy_vect8),
2627 .prepare = test_mul1_prepare,
2628 .check_result = test_div1_check,
2631 .name = "test_call1",
2632 .arg_sz = sizeof(struct dummy_offset),
2634 .ins = test_call1_prog,
2635 .nb_ins = RTE_DIM(test_call1_prog),
2637 .type = RTE_BPF_ARG_PTR,
2638 .size = sizeof(struct dummy_offset),
2640 .xsym = test_call1_xsym,
2641 .nb_xsym = RTE_DIM(test_call1_xsym),
2643 .prepare = test_load1_prepare,
2644 .check_result = test_call1_check,
2645 /* for now don't support function calls on 32 bit platform */
2646 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2649 .name = "test_call2",
2650 .arg_sz = sizeof(struct dummy_offset),
2652 .ins = test_call2_prog,
2653 .nb_ins = RTE_DIM(test_call2_prog),
2655 .type = RTE_BPF_ARG_PTR,
2656 .size = sizeof(struct dummy_offset),
2658 .xsym = test_call2_xsym,
2659 .nb_xsym = RTE_DIM(test_call2_xsym),
2661 .prepare = test_store1_prepare,
2662 .check_result = test_call2_check,
2663 /* for now don't support function calls on 32 bit platform */
2664 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2667 .name = "test_call3",
2668 .arg_sz = sizeof(struct dummy_vect8),
2670 .ins = test_call3_prog,
2671 .nb_ins = RTE_DIM(test_call3_prog),
2673 .type = RTE_BPF_ARG_PTR,
2674 .size = sizeof(struct dummy_vect8),
2676 .xsym = test_call3_xsym,
2677 .nb_xsym = RTE_DIM(test_call3_xsym),
2679 .prepare = test_call3_prepare,
2680 .check_result = test_call3_check,
2681 /* for now don't support function calls on 32 bit platform */
2682 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2685 .name = "test_call4",
2686 .arg_sz = sizeof(struct dummy_offset),
2688 .ins = test_call4_prog,
2689 .nb_ins = RTE_DIM(test_call4_prog),
2691 .type = RTE_BPF_ARG_PTR,
2692 .size = 2 * sizeof(struct dummy_offset),
2694 .xsym = test_call4_xsym,
2695 .nb_xsym = RTE_DIM(test_call4_xsym),
2697 .prepare = test_store1_prepare,
2698 .check_result = test_call4_check,
2699 /* for now don't support function calls on 32 bit platform */
2700 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2703 .name = "test_call5",
2704 .arg_sz = sizeof(struct dummy_offset),
2706 .ins = test_call5_prog,
2707 .nb_ins = RTE_DIM(test_call5_prog),
2709 .type = RTE_BPF_ARG_PTR,
2710 .size = sizeof(struct dummy_offset),
2712 .xsym = test_call5_xsym,
2713 .nb_xsym = RTE_DIM(test_call5_xsym),
2715 .prepare = test_store1_prepare,
2716 .check_result = test_call5_check,
2717 /* for now don't support function calls on 32 bit platform */
2718 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
2723 run_test(const struct bpf_test *tst)
2727 struct rte_bpf *bpf;
2728 struct rte_bpf_jit jit;
2729 uint8_t tbuf[tst->arg_sz];
2731 printf("%s(%s) start\n", __func__, tst->name);
2733 bpf = rte_bpf_load(&tst->prm);
2735 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
2736 __func__, __LINE__, rte_errno, strerror(rte_errno));
2742 rc = rte_bpf_exec(bpf, tbuf);
2743 ret = tst->check_result(rc, tbuf);
2745 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
2746 __func__, __LINE__, tst->name, ret, strerror(ret));
2749 rte_bpf_get_jit(bpf, &jit);
2750 if (jit.func == NULL)
2754 rc = jit.func(tbuf);
2755 rv = tst->check_result(rc, tbuf);
2758 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
2759 __func__, __LINE__, tst->name, rv, strerror(ret));
2762 rte_bpf_destroy(bpf);
2774 for (i = 0; i != RTE_DIM(tests); i++) {
2775 rv = run_test(tests + i);
2776 if (tests[i].allow_fail == 0)
2783 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);