1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_random.h>
14 #include <rte_byteorder.h>
15 #include <rte_errno.h>
21 * Basic functional tests for librte_bpf.
22 * The main procedure - load eBPF program, execute it and
23 * compare restuls with expected values.
34 struct dummy_offset in[8];
35 struct dummy_offset out[8];
38 #define TEST_FILL_1 0xDEADBEEF
41 #define TEST_MUL_2 -100
43 #define TEST_SHIFT_1 15
44 #define TEST_SHIFT_2 33
47 #define TEST_JCC_2 -123
48 #define TEST_JCC_3 5678
49 #define TEST_JCC_4 TEST_FILL_1
54 struct rte_bpf_prm prm;
55 void (*prepare)(void *);
56 int (*check_result)(uint64_t, const void *);
61 * Compare return value and result data with expected ones.
62 * Report a failure if they don't match.
65 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
66 const void *exp_res, const void *ret_res, size_t res_sz)
71 if (exp_rc != ret_rc) {
72 printf("%s@%d: invalid return value, expected: 0x%" PRIx64
73 ",result: 0x%" PRIx64 "\n",
74 func, __LINE__, exp_rc, ret_rc);
78 if (memcmp(exp_res, ret_res, res_sz) != 0) {
79 printf("%s: invalid value\n", func);
80 rte_memdump(stdout, "expected", exp_res, res_sz);
81 rte_memdump(stdout, "result", ret_res, res_sz);
88 /* store immediate test-cases */
89 static const struct ebpf_insn test_store1_prog[] = {
91 .code = (BPF_ST | BPF_MEM | BPF_B),
92 .dst_reg = EBPF_REG_1,
93 .off = offsetof(struct dummy_offset, u8),
97 .code = (BPF_ST | BPF_MEM | BPF_H),
98 .dst_reg = EBPF_REG_1,
99 .off = offsetof(struct dummy_offset, u16),
103 .code = (BPF_ST | BPF_MEM | BPF_W),
104 .dst_reg = EBPF_REG_1,
105 .off = offsetof(struct dummy_offset, u32),
109 .code = (BPF_ST | BPF_MEM | EBPF_DW),
110 .dst_reg = EBPF_REG_1,
111 .off = offsetof(struct dummy_offset, u64),
116 .code = (BPF_ALU | EBPF_MOV | BPF_K),
117 .dst_reg = EBPF_REG_0,
121 .code = (BPF_JMP | EBPF_EXIT),
126 test_store1_prepare(void *arg)
128 struct dummy_offset *df;
131 memset(df, 0, sizeof(*df));
135 test_store1_check(uint64_t rc, const void *arg)
137 const struct dummy_offset *dft;
138 struct dummy_offset dfe;
142 memset(&dfe, 0, sizeof(dfe));
143 dfe.u64 = (int32_t)TEST_FILL_1;
148 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
151 /* store register test-cases */
152 static const struct ebpf_insn test_store2_prog[] = {
155 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
156 .dst_reg = EBPF_REG_2,
160 .code = (BPF_STX | BPF_MEM | BPF_B),
161 .dst_reg = EBPF_REG_1,
162 .src_reg = EBPF_REG_2,
163 .off = offsetof(struct dummy_offset, u8),
166 .code = (BPF_STX | BPF_MEM | BPF_H),
167 .dst_reg = EBPF_REG_1,
168 .src_reg = EBPF_REG_2,
169 .off = offsetof(struct dummy_offset, u16),
172 .code = (BPF_STX | BPF_MEM | BPF_W),
173 .dst_reg = EBPF_REG_1,
174 .src_reg = EBPF_REG_2,
175 .off = offsetof(struct dummy_offset, u32),
178 .code = (BPF_STX | BPF_MEM | EBPF_DW),
179 .dst_reg = EBPF_REG_1,
180 .src_reg = EBPF_REG_2,
181 .off = offsetof(struct dummy_offset, u64),
185 .code = (BPF_ALU | EBPF_MOV | BPF_K),
186 .dst_reg = EBPF_REG_0,
190 .code = (BPF_JMP | EBPF_EXIT),
194 /* load test-cases */
195 static const struct ebpf_insn test_load1_prog[] = {
198 .code = (BPF_LDX | BPF_MEM | BPF_B),
199 .dst_reg = EBPF_REG_2,
200 .src_reg = EBPF_REG_1,
201 .off = offsetof(struct dummy_offset, u8),
204 .code = (BPF_LDX | BPF_MEM | BPF_H),
205 .dst_reg = EBPF_REG_3,
206 .src_reg = EBPF_REG_1,
207 .off = offsetof(struct dummy_offset, u16),
210 .code = (BPF_LDX | BPF_MEM | BPF_W),
211 .dst_reg = EBPF_REG_4,
212 .src_reg = EBPF_REG_1,
213 .off = offsetof(struct dummy_offset, u32),
216 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
217 .dst_reg = EBPF_REG_0,
218 .src_reg = EBPF_REG_1,
219 .off = offsetof(struct dummy_offset, u64),
223 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
224 .dst_reg = EBPF_REG_0,
225 .src_reg = EBPF_REG_4,
228 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
229 .dst_reg = EBPF_REG_0,
230 .src_reg = EBPF_REG_3,
233 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
234 .dst_reg = EBPF_REG_0,
235 .src_reg = EBPF_REG_2,
238 .code = (BPF_JMP | EBPF_EXIT),
243 test_load1_prepare(void *arg)
245 struct dummy_offset *df;
249 memset(df, 0, sizeof(*df));
250 df->u64 = (int32_t)TEST_FILL_1;
257 test_load1_check(uint64_t rc, const void *arg)
260 const struct dummy_offset *dft;
268 return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
271 /* alu mul test-cases */
272 static const struct ebpf_insn test_mul1_prog[] = {
275 .code = (BPF_LDX | BPF_MEM | BPF_W),
276 .dst_reg = EBPF_REG_2,
277 .src_reg = EBPF_REG_1,
278 .off = offsetof(struct dummy_vect8, in[0].u32),
281 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
282 .dst_reg = EBPF_REG_3,
283 .src_reg = EBPF_REG_1,
284 .off = offsetof(struct dummy_vect8, in[1].u64),
287 .code = (BPF_LDX | BPF_MEM | BPF_W),
288 .dst_reg = EBPF_REG_4,
289 .src_reg = EBPF_REG_1,
290 .off = offsetof(struct dummy_vect8, in[2].u32),
293 .code = (BPF_ALU | BPF_MUL | BPF_K),
294 .dst_reg = EBPF_REG_2,
298 .code = (EBPF_ALU64 | BPF_MUL | BPF_K),
299 .dst_reg = EBPF_REG_3,
303 .code = (BPF_ALU | BPF_MUL | BPF_X),
304 .dst_reg = EBPF_REG_4,
305 .src_reg = EBPF_REG_2,
308 .code = (EBPF_ALU64 | BPF_MUL | BPF_X),
309 .dst_reg = EBPF_REG_4,
310 .src_reg = EBPF_REG_3,
313 .code = (BPF_STX | BPF_MEM | EBPF_DW),
314 .dst_reg = EBPF_REG_1,
315 .src_reg = EBPF_REG_2,
316 .off = offsetof(struct dummy_vect8, out[0].u64),
319 .code = (BPF_STX | BPF_MEM | EBPF_DW),
320 .dst_reg = EBPF_REG_1,
321 .src_reg = EBPF_REG_3,
322 .off = offsetof(struct dummy_vect8, out[1].u64),
325 .code = (BPF_STX | BPF_MEM | EBPF_DW),
326 .dst_reg = EBPF_REG_1,
327 .src_reg = EBPF_REG_4,
328 .off = offsetof(struct dummy_vect8, out[2].u64),
332 .code = (BPF_ALU | EBPF_MOV | BPF_K),
333 .dst_reg = EBPF_REG_0,
337 .code = (BPF_JMP | EBPF_EXIT),
342 test_mul1_prepare(void *arg)
344 struct dummy_vect8 *dv;
351 memset(dv, 0, sizeof(*dv));
353 dv->in[1].u64 = v << 12 | v >> 6;
358 test_mul1_check(uint64_t rc, const void *arg)
361 const struct dummy_vect8 *dvt;
362 struct dummy_vect8 dve;
365 memset(&dve, 0, sizeof(dve));
371 r2 = (uint32_t)r2 * TEST_MUL_1;
373 r4 = (uint32_t)(r4 * r2);
380 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
383 /* alu shift test-cases */
384 static const struct ebpf_insn test_shift1_prog[] = {
387 .code = (BPF_LDX | BPF_MEM | BPF_W),
388 .dst_reg = EBPF_REG_2,
389 .src_reg = EBPF_REG_1,
390 .off = offsetof(struct dummy_vect8, in[0].u32),
393 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
394 .dst_reg = EBPF_REG_3,
395 .src_reg = EBPF_REG_1,
396 .off = offsetof(struct dummy_vect8, in[1].u64),
399 .code = (BPF_LDX | BPF_MEM | BPF_W),
400 .dst_reg = EBPF_REG_4,
401 .src_reg = EBPF_REG_1,
402 .off = offsetof(struct dummy_vect8, in[2].u32),
405 .code = (BPF_ALU | BPF_LSH | BPF_K),
406 .dst_reg = EBPF_REG_2,
410 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
411 .dst_reg = EBPF_REG_3,
415 .code = (BPF_STX | BPF_MEM | EBPF_DW),
416 .dst_reg = EBPF_REG_1,
417 .src_reg = EBPF_REG_2,
418 .off = offsetof(struct dummy_vect8, out[0].u64),
421 .code = (BPF_STX | BPF_MEM | EBPF_DW),
422 .dst_reg = EBPF_REG_1,
423 .src_reg = EBPF_REG_3,
424 .off = offsetof(struct dummy_vect8, out[1].u64),
427 .code = (BPF_ALU | BPF_RSH | BPF_X),
428 .dst_reg = EBPF_REG_2,
429 .src_reg = EBPF_REG_4,
432 .code = (EBPF_ALU64 | BPF_LSH | BPF_X),
433 .dst_reg = EBPF_REG_3,
434 .src_reg = EBPF_REG_4,
437 .code = (BPF_STX | BPF_MEM | EBPF_DW),
438 .dst_reg = EBPF_REG_1,
439 .src_reg = EBPF_REG_2,
440 .off = offsetof(struct dummy_vect8, out[2].u64),
443 .code = (BPF_STX | BPF_MEM | EBPF_DW),
444 .dst_reg = EBPF_REG_1,
445 .src_reg = EBPF_REG_3,
446 .off = offsetof(struct dummy_vect8, out[3].u64),
449 .code = (BPF_LDX | BPF_MEM | BPF_W),
450 .dst_reg = EBPF_REG_2,
451 .src_reg = EBPF_REG_1,
452 .off = offsetof(struct dummy_vect8, in[0].u32),
455 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
456 .dst_reg = EBPF_REG_3,
457 .src_reg = EBPF_REG_1,
458 .off = offsetof(struct dummy_vect8, in[1].u64),
461 .code = (BPF_LDX | BPF_MEM | BPF_W),
462 .dst_reg = EBPF_REG_4,
463 .src_reg = EBPF_REG_1,
464 .off = offsetof(struct dummy_vect8, in[2].u32),
467 .code = (BPF_ALU | BPF_AND | BPF_K),
468 .dst_reg = EBPF_REG_2,
469 .imm = sizeof(uint64_t) * CHAR_BIT - 1,
472 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
473 .dst_reg = EBPF_REG_3,
474 .src_reg = EBPF_REG_2,
477 .code = (BPF_ALU | BPF_AND | BPF_K),
478 .dst_reg = EBPF_REG_2,
479 .imm = sizeof(uint32_t) * CHAR_BIT - 1,
482 .code = (BPF_ALU | BPF_LSH | BPF_X),
483 .dst_reg = EBPF_REG_4,
484 .src_reg = EBPF_REG_2,
487 .code = (BPF_STX | BPF_MEM | EBPF_DW),
488 .dst_reg = EBPF_REG_1,
489 .src_reg = EBPF_REG_4,
490 .off = offsetof(struct dummy_vect8, out[4].u64),
493 .code = (BPF_STX | BPF_MEM | EBPF_DW),
494 .dst_reg = EBPF_REG_1,
495 .src_reg = EBPF_REG_3,
496 .off = offsetof(struct dummy_vect8, out[5].u64),
500 .code = (BPF_ALU | EBPF_MOV | BPF_K),
501 .dst_reg = EBPF_REG_0,
505 .code = (BPF_JMP | EBPF_EXIT),
510 test_shift1_prepare(void *arg)
512 struct dummy_vect8 *dv;
519 memset(dv, 0, sizeof(*dv));
521 dv->in[1].u64 = v << 12 | v >> 6;
522 dv->in[2].u32 = (-v ^ 5);
526 test_shift1_check(uint64_t rc, const void *arg)
529 const struct dummy_vect8 *dvt;
530 struct dummy_vect8 dve;
533 memset(&dve, 0, sizeof(dve));
539 r2 = (uint32_t)r2 << TEST_SHIFT_1;
540 r3 = (int64_t)r3 >> TEST_SHIFT_2;
545 r2 = (uint32_t)r2 >> r4;
555 r2 &= sizeof(uint64_t) * CHAR_BIT - 1;
556 r3 = (int64_t)r3 >> r2;
557 r2 &= sizeof(uint32_t) * CHAR_BIT - 1;
558 r4 = (uint32_t)r4 << r2;
563 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
567 static const struct ebpf_insn test_jump1_prog[] = {
570 .code = (BPF_ALU | EBPF_MOV | BPF_K),
571 .dst_reg = EBPF_REG_0,
575 .code = (BPF_LDX | BPF_MEM | BPF_W),
576 .dst_reg = EBPF_REG_2,
577 .src_reg = EBPF_REG_1,
578 .off = offsetof(struct dummy_vect8, in[0].u32),
581 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
582 .dst_reg = EBPF_REG_3,
583 .src_reg = EBPF_REG_1,
584 .off = offsetof(struct dummy_vect8, in[0].u64),
587 .code = (BPF_LDX | BPF_MEM | BPF_W),
588 .dst_reg = EBPF_REG_4,
589 .src_reg = EBPF_REG_1,
590 .off = offsetof(struct dummy_vect8, in[1].u32),
593 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
594 .dst_reg = EBPF_REG_5,
595 .src_reg = EBPF_REG_1,
596 .off = offsetof(struct dummy_vect8, in[1].u64),
599 .code = (BPF_JMP | BPF_JEQ | BPF_K),
600 .dst_reg = EBPF_REG_2,
605 .code = (BPF_JMP | EBPF_JSLE | BPF_K),
606 .dst_reg = EBPF_REG_3,
611 .code = (BPF_JMP | BPF_JGT | BPF_K),
612 .dst_reg = EBPF_REG_4,
617 .code = (BPF_JMP | BPF_JSET | BPF_K),
618 .dst_reg = EBPF_REG_5,
623 .code = (BPF_JMP | EBPF_JNE | BPF_X),
624 .dst_reg = EBPF_REG_2,
625 .src_reg = EBPF_REG_3,
629 .code = (BPF_JMP | EBPF_JSGT | BPF_X),
630 .dst_reg = EBPF_REG_2,
631 .src_reg = EBPF_REG_4,
635 .code = (BPF_JMP | EBPF_JLE | BPF_X),
636 .dst_reg = EBPF_REG_2,
637 .src_reg = EBPF_REG_5,
641 .code = (BPF_JMP | BPF_JSET | BPF_X),
642 .dst_reg = EBPF_REG_3,
643 .src_reg = EBPF_REG_5,
647 .code = (BPF_JMP | EBPF_EXIT),
650 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
651 .dst_reg = EBPF_REG_0,
655 .code = (BPF_JMP | BPF_JA),
659 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
660 .dst_reg = EBPF_REG_0,
664 .code = (BPF_JMP | BPF_JA),
668 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
669 .dst_reg = EBPF_REG_0,
673 .code = (BPF_JMP | BPF_JA),
677 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
678 .dst_reg = EBPF_REG_0,
682 .code = (BPF_JMP | BPF_JA),
686 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
687 .dst_reg = EBPF_REG_0,
691 .code = (BPF_JMP | BPF_JA),
695 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
696 .dst_reg = EBPF_REG_0,
700 .code = (BPF_JMP | BPF_JA),
704 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
705 .dst_reg = EBPF_REG_0,
709 .code = (BPF_JMP | BPF_JA),
713 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
714 .dst_reg = EBPF_REG_0,
718 .code = (BPF_JMP | BPF_JA),
724 test_jump1_prepare(void *arg)
726 struct dummy_vect8 *dv;
734 memset(dv, 0, sizeof(*dv));
737 dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
738 dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
742 test_jump1_check(uint64_t rc, const void *arg)
744 uint64_t r2, r3, r4, r5, rv;
745 const struct dummy_vect8 *dvt;
755 if (r2 == TEST_JCC_1)
757 if ((int64_t)r3 <= TEST_JCC_2)
765 if ((int64_t)r2 > (int64_t)r4)
772 return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
775 /* alu (add, sub, and, or, xor, neg) test-cases */
776 static const struct ebpf_insn test_alu1_prog[] = {
779 .code = (BPF_LDX | BPF_MEM | BPF_W),
780 .dst_reg = EBPF_REG_2,
781 .src_reg = EBPF_REG_1,
782 .off = offsetof(struct dummy_vect8, in[0].u32),
785 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
786 .dst_reg = EBPF_REG_3,
787 .src_reg = EBPF_REG_1,
788 .off = offsetof(struct dummy_vect8, in[0].u64),
791 .code = (BPF_LDX | BPF_MEM | BPF_W),
792 .dst_reg = EBPF_REG_4,
793 .src_reg = EBPF_REG_1,
794 .off = offsetof(struct dummy_vect8, in[1].u32),
797 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
798 .dst_reg = EBPF_REG_5,
799 .src_reg = EBPF_REG_1,
800 .off = offsetof(struct dummy_vect8, in[1].u64),
803 .code = (BPF_ALU | BPF_AND | BPF_K),
804 .dst_reg = EBPF_REG_2,
808 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
809 .dst_reg = EBPF_REG_3,
813 .code = (BPF_ALU | BPF_XOR | BPF_K),
814 .dst_reg = EBPF_REG_4,
818 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
819 .dst_reg = EBPF_REG_5,
823 .code = (BPF_STX | BPF_MEM | EBPF_DW),
824 .dst_reg = EBPF_REG_1,
825 .src_reg = EBPF_REG_2,
826 .off = offsetof(struct dummy_vect8, out[0].u64),
829 .code = (BPF_STX | BPF_MEM | EBPF_DW),
830 .dst_reg = EBPF_REG_1,
831 .src_reg = EBPF_REG_3,
832 .off = offsetof(struct dummy_vect8, out[1].u64),
835 .code = (BPF_STX | BPF_MEM | EBPF_DW),
836 .dst_reg = EBPF_REG_1,
837 .src_reg = EBPF_REG_4,
838 .off = offsetof(struct dummy_vect8, out[2].u64),
841 .code = (BPF_STX | BPF_MEM | EBPF_DW),
842 .dst_reg = EBPF_REG_1,
843 .src_reg = EBPF_REG_5,
844 .off = offsetof(struct dummy_vect8, out[3].u64),
847 .code = (BPF_ALU | BPF_OR | BPF_X),
848 .dst_reg = EBPF_REG_2,
849 .src_reg = EBPF_REG_3,
852 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
853 .dst_reg = EBPF_REG_3,
854 .src_reg = EBPF_REG_4,
857 .code = (BPF_ALU | BPF_SUB | BPF_X),
858 .dst_reg = EBPF_REG_4,
859 .src_reg = EBPF_REG_5,
862 .code = (EBPF_ALU64 | BPF_AND | BPF_X),
863 .dst_reg = EBPF_REG_5,
864 .src_reg = EBPF_REG_2,
867 .code = (BPF_STX | BPF_MEM | EBPF_DW),
868 .dst_reg = EBPF_REG_1,
869 .src_reg = EBPF_REG_2,
870 .off = offsetof(struct dummy_vect8, out[4].u64),
873 .code = (BPF_STX | BPF_MEM | EBPF_DW),
874 .dst_reg = EBPF_REG_1,
875 .src_reg = EBPF_REG_3,
876 .off = offsetof(struct dummy_vect8, out[5].u64),
879 .code = (BPF_STX | BPF_MEM | EBPF_DW),
880 .dst_reg = EBPF_REG_1,
881 .src_reg = EBPF_REG_4,
882 .off = offsetof(struct dummy_vect8, out[6].u64),
885 .code = (BPF_STX | BPF_MEM | EBPF_DW),
886 .dst_reg = EBPF_REG_1,
887 .src_reg = EBPF_REG_5,
888 .off = offsetof(struct dummy_vect8, out[7].u64),
890 /* return (-r2 + (-r3)) */
892 .code = (BPF_ALU | BPF_NEG),
893 .dst_reg = EBPF_REG_2,
896 .code = (EBPF_ALU64 | BPF_NEG),
897 .dst_reg = EBPF_REG_3,
900 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
901 .dst_reg = EBPF_REG_2,
902 .src_reg = EBPF_REG_3,
905 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
906 .dst_reg = EBPF_REG_0,
907 .src_reg = EBPF_REG_2,
910 .code = (BPF_JMP | EBPF_EXIT),
915 test_alu1_check(uint64_t rc, const void *arg)
917 uint64_t r2, r3, r4, r5, rv;
918 const struct dummy_vect8 *dvt;
919 struct dummy_vect8 dve;
922 memset(&dve, 0, sizeof(dve));
929 r2 = (uint32_t)r2 & TEST_FILL_1;
930 r3 |= (int32_t) TEST_FILL_1;
931 r4 = (uint32_t)r4 ^ TEST_FILL_1;
932 r5 += (int32_t)TEST_FILL_1;
939 r2 = (uint32_t)r2 | (uint32_t)r3;
941 r4 = (uint32_t)r4 - (uint32_t)r5;
954 return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
957 /* endianness conversions (BE->LE/LE->BE) test-cases */
958 static const struct ebpf_insn test_bele1_prog[] = {
961 .code = (BPF_LDX | BPF_MEM | BPF_H),
962 .dst_reg = EBPF_REG_2,
963 .src_reg = EBPF_REG_1,
964 .off = offsetof(struct dummy_vect8, in[0].u16),
967 .code = (BPF_LDX | BPF_MEM | BPF_W),
968 .dst_reg = EBPF_REG_3,
969 .src_reg = EBPF_REG_1,
970 .off = offsetof(struct dummy_vect8, in[0].u32),
973 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
974 .dst_reg = EBPF_REG_4,
975 .src_reg = EBPF_REG_1,
976 .off = offsetof(struct dummy_vect8, in[0].u64),
979 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
980 .dst_reg = EBPF_REG_2,
981 .imm = sizeof(uint16_t) * CHAR_BIT,
984 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
985 .dst_reg = EBPF_REG_3,
986 .imm = sizeof(uint32_t) * CHAR_BIT,
989 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
990 .dst_reg = EBPF_REG_4,
991 .imm = sizeof(uint64_t) * CHAR_BIT,
994 .code = (BPF_STX | BPF_MEM | EBPF_DW),
995 .dst_reg = EBPF_REG_1,
996 .src_reg = EBPF_REG_2,
997 .off = offsetof(struct dummy_vect8, out[0].u64),
1000 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1001 .dst_reg = EBPF_REG_1,
1002 .src_reg = EBPF_REG_3,
1003 .off = offsetof(struct dummy_vect8, out[1].u64),
1006 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1007 .dst_reg = EBPF_REG_1,
1008 .src_reg = EBPF_REG_4,
1009 .off = offsetof(struct dummy_vect8, out[2].u64),
1012 .code = (BPF_LDX | BPF_MEM | BPF_H),
1013 .dst_reg = EBPF_REG_2,
1014 .src_reg = EBPF_REG_1,
1015 .off = offsetof(struct dummy_vect8, in[0].u16),
1018 .code = (BPF_LDX | BPF_MEM | BPF_W),
1019 .dst_reg = EBPF_REG_3,
1020 .src_reg = EBPF_REG_1,
1021 .off = offsetof(struct dummy_vect8, in[0].u32),
1024 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1025 .dst_reg = EBPF_REG_4,
1026 .src_reg = EBPF_REG_1,
1027 .off = offsetof(struct dummy_vect8, in[0].u64),
1030 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1031 .dst_reg = EBPF_REG_2,
1032 .imm = sizeof(uint16_t) * CHAR_BIT,
1035 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1036 .dst_reg = EBPF_REG_3,
1037 .imm = sizeof(uint32_t) * CHAR_BIT,
1040 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1041 .dst_reg = EBPF_REG_4,
1042 .imm = sizeof(uint64_t) * CHAR_BIT,
1045 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1046 .dst_reg = EBPF_REG_1,
1047 .src_reg = EBPF_REG_2,
1048 .off = offsetof(struct dummy_vect8, out[3].u64),
1051 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1052 .dst_reg = EBPF_REG_1,
1053 .src_reg = EBPF_REG_3,
1054 .off = offsetof(struct dummy_vect8, out[4].u64),
1057 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1058 .dst_reg = EBPF_REG_1,
1059 .src_reg = EBPF_REG_4,
1060 .off = offsetof(struct dummy_vect8, out[5].u64),
1064 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1065 .dst_reg = EBPF_REG_0,
1069 .code = (BPF_JMP | EBPF_EXIT),
1074 test_bele1_prepare(void *arg)
1076 struct dummy_vect8 *dv;
1080 memset(dv, 0, sizeof(*dv));
1081 dv->in[0].u64 = rte_rand();
1082 dv->in[0].u32 = dv->in[0].u64;
1083 dv->in[0].u16 = dv->in[0].u64;
1087 test_bele1_check(uint64_t rc, const void *arg)
1089 uint64_t r2, r3, r4;
1090 const struct dummy_vect8 *dvt;
1091 struct dummy_vect8 dve;
1094 memset(&dve, 0, sizeof(dve));
1096 r2 = dvt->in[0].u16;
1097 r3 = dvt->in[0].u32;
1098 r4 = dvt->in[0].u64;
1100 r2 = rte_cpu_to_be_16(r2);
1101 r3 = rte_cpu_to_be_32(r3);
1102 r4 = rte_cpu_to_be_64(r4);
1104 dve.out[0].u64 = r2;
1105 dve.out[1].u64 = r3;
1106 dve.out[2].u64 = r4;
1108 r2 = dvt->in[0].u16;
1109 r3 = dvt->in[0].u32;
1110 r4 = dvt->in[0].u64;
1112 r2 = rte_cpu_to_le_16(r2);
1113 r3 = rte_cpu_to_le_32(r3);
1114 r4 = rte_cpu_to_le_64(r4);
1116 dve.out[3].u64 = r2;
1117 dve.out[4].u64 = r3;
1118 dve.out[5].u64 = r4;
1120 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1123 /* atomic add test-cases */
1124 static const struct ebpf_insn test_xadd1_prog[] = {
1127 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1128 .dst_reg = EBPF_REG_2,
1132 .code = (BPF_STX | EBPF_XADD | BPF_W),
1133 .dst_reg = EBPF_REG_1,
1134 .src_reg = EBPF_REG_2,
1135 .off = offsetof(struct dummy_offset, u32),
1138 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1139 .dst_reg = EBPF_REG_1,
1140 .src_reg = EBPF_REG_2,
1141 .off = offsetof(struct dummy_offset, u64),
1144 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1145 .dst_reg = EBPF_REG_3,
1149 .code = (BPF_STX | EBPF_XADD | BPF_W),
1150 .dst_reg = EBPF_REG_1,
1151 .src_reg = EBPF_REG_3,
1152 .off = offsetof(struct dummy_offset, u32),
1155 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1156 .dst_reg = EBPF_REG_1,
1157 .src_reg = EBPF_REG_3,
1158 .off = offsetof(struct dummy_offset, u64),
1161 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1162 .dst_reg = EBPF_REG_4,
1166 .code = (BPF_STX | EBPF_XADD | BPF_W),
1167 .dst_reg = EBPF_REG_1,
1168 .src_reg = EBPF_REG_4,
1169 .off = offsetof(struct dummy_offset, u32),
1172 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1173 .dst_reg = EBPF_REG_1,
1174 .src_reg = EBPF_REG_4,
1175 .off = offsetof(struct dummy_offset, u64),
1178 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1179 .dst_reg = EBPF_REG_5,
1183 .code = (BPF_STX | EBPF_XADD | BPF_W),
1184 .dst_reg = EBPF_REG_1,
1185 .src_reg = EBPF_REG_5,
1186 .off = offsetof(struct dummy_offset, u32),
1189 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1190 .dst_reg = EBPF_REG_1,
1191 .src_reg = EBPF_REG_5,
1192 .off = offsetof(struct dummy_offset, u64),
1195 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1196 .dst_reg = EBPF_REG_6,
1200 .code = (BPF_STX | EBPF_XADD | BPF_W),
1201 .dst_reg = EBPF_REG_1,
1202 .src_reg = EBPF_REG_6,
1203 .off = offsetof(struct dummy_offset, u32),
1206 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1207 .dst_reg = EBPF_REG_1,
1208 .src_reg = EBPF_REG_6,
1209 .off = offsetof(struct dummy_offset, u64),
1212 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1213 .dst_reg = EBPF_REG_7,
1217 .code = (BPF_STX | EBPF_XADD | BPF_W),
1218 .dst_reg = EBPF_REG_1,
1219 .src_reg = EBPF_REG_7,
1220 .off = offsetof(struct dummy_offset, u32),
1223 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1224 .dst_reg = EBPF_REG_1,
1225 .src_reg = EBPF_REG_7,
1226 .off = offsetof(struct dummy_offset, u64),
1229 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1230 .dst_reg = EBPF_REG_8,
1234 .code = (BPF_STX | EBPF_XADD | BPF_W),
1235 .dst_reg = EBPF_REG_1,
1236 .src_reg = EBPF_REG_8,
1237 .off = offsetof(struct dummy_offset, u32),
1240 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1241 .dst_reg = EBPF_REG_1,
1242 .src_reg = EBPF_REG_8,
1243 .off = offsetof(struct dummy_offset, u64),
1247 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1248 .dst_reg = EBPF_REG_0,
1252 .code = (BPF_JMP | EBPF_EXIT),
1257 test_xadd1_check(uint64_t rc, const void *arg)
1260 const struct dummy_offset *dft;
1261 struct dummy_offset dfe;
1264 memset(&dfe, 0, sizeof(dfe));
1267 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1268 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1271 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1272 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1274 rv = (int32_t)TEST_FILL_1;
1275 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1276 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1279 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1280 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1283 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1284 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1287 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1288 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1291 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1292 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1294 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1297 /* alu div test-cases */
1298 static const struct ebpf_insn test_div1_prog[] = {
1301 .code = (BPF_LDX | BPF_MEM | BPF_W),
1302 .dst_reg = EBPF_REG_2,
1303 .src_reg = EBPF_REG_1,
1304 .off = offsetof(struct dummy_vect8, in[0].u32),
1307 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1308 .dst_reg = EBPF_REG_3,
1309 .src_reg = EBPF_REG_1,
1310 .off = offsetof(struct dummy_vect8, in[1].u64),
1313 .code = (BPF_LDX | BPF_MEM | BPF_W),
1314 .dst_reg = EBPF_REG_4,
1315 .src_reg = EBPF_REG_1,
1316 .off = offsetof(struct dummy_vect8, in[2].u32),
1319 .code = (BPF_ALU | BPF_DIV | BPF_K),
1320 .dst_reg = EBPF_REG_2,
1324 .code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1325 .dst_reg = EBPF_REG_3,
1329 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1330 .dst_reg = EBPF_REG_2,
1334 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1335 .dst_reg = EBPF_REG_3,
1339 .code = (BPF_ALU | BPF_MOD | BPF_X),
1340 .dst_reg = EBPF_REG_4,
1341 .src_reg = EBPF_REG_2,
1344 .code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1345 .dst_reg = EBPF_REG_4,
1346 .src_reg = EBPF_REG_3,
1349 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1350 .dst_reg = EBPF_REG_1,
1351 .src_reg = EBPF_REG_2,
1352 .off = offsetof(struct dummy_vect8, out[0].u64),
1355 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1356 .dst_reg = EBPF_REG_1,
1357 .src_reg = EBPF_REG_3,
1358 .off = offsetof(struct dummy_vect8, out[1].u64),
1361 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1362 .dst_reg = EBPF_REG_1,
1363 .src_reg = EBPF_REG_4,
1364 .off = offsetof(struct dummy_vect8, out[2].u64),
1366 /* check that we can handle division by zero gracefully. */
1368 .code = (BPF_LDX | BPF_MEM | BPF_W),
1369 .dst_reg = EBPF_REG_2,
1370 .src_reg = EBPF_REG_1,
1371 .off = offsetof(struct dummy_vect8, in[3].u32),
1374 .code = (BPF_ALU | BPF_DIV | BPF_X),
1375 .dst_reg = EBPF_REG_4,
1376 .src_reg = EBPF_REG_2,
1380 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1381 .dst_reg = EBPF_REG_0,
1385 .code = (BPF_JMP | EBPF_EXIT),
1390 test_div1_check(uint64_t rc, const void *arg)
1392 uint64_t r2, r3, r4;
1393 const struct dummy_vect8 *dvt;
1394 struct dummy_vect8 dve;
1397 memset(&dve, 0, sizeof(dve));
1399 r2 = dvt->in[0].u32;
1400 r3 = dvt->in[1].u64;
1401 r4 = dvt->in[2].u32;
1403 r2 = (uint32_t)r2 / TEST_MUL_1;
1407 r4 = (uint32_t)(r4 % r2);
1410 dve.out[0].u64 = r2;
1411 dve.out[1].u64 = r3;
1412 dve.out[2].u64 = r4;
1415 * in the test prog we attempted to divide by zero.
1416 * so return value should return 0.
1418 return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1421 /* call test-cases */
1422 static const struct ebpf_insn test_call1_prog[] = {
1425 .code = (BPF_LDX | BPF_MEM | BPF_W),
1426 .dst_reg = EBPF_REG_2,
1427 .src_reg = EBPF_REG_1,
1428 .off = offsetof(struct dummy_offset, u32),
1431 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1432 .dst_reg = EBPF_REG_3,
1433 .src_reg = EBPF_REG_1,
1434 .off = offsetof(struct dummy_offset, u64),
1437 .code = (BPF_STX | BPF_MEM | BPF_W),
1438 .dst_reg = EBPF_REG_10,
1439 .src_reg = EBPF_REG_2,
1443 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1444 .dst_reg = EBPF_REG_10,
1445 .src_reg = EBPF_REG_3,
1449 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1450 .dst_reg = EBPF_REG_2,
1451 .src_reg = EBPF_REG_10,
1454 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1455 .dst_reg = EBPF_REG_2,
1459 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1460 .dst_reg = EBPF_REG_3,
1461 .src_reg = EBPF_REG_10,
1464 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1465 .dst_reg = EBPF_REG_3,
1469 .code = (BPF_JMP | EBPF_CALL),
1473 .code = (BPF_LDX | BPF_MEM | BPF_W),
1474 .dst_reg = EBPF_REG_2,
1475 .src_reg = EBPF_REG_10,
1479 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1480 .dst_reg = EBPF_REG_0,
1481 .src_reg = EBPF_REG_10,
1485 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1486 .dst_reg = EBPF_REG_0,
1487 .src_reg = EBPF_REG_2,
1490 .code = (BPF_JMP | EBPF_EXIT),
1495 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1497 const struct dummy_offset *dv;
1506 test_call1_check(uint64_t rc, const void *arg)
1510 const struct dummy_offset *dv;
1516 dummy_func1(arg, &v32, &v64);
1520 printf("%s@%d: invalid return value "
1521 "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
1522 __func__, __LINE__, v64, rc);
1526 return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1529 static const struct rte_bpf_xsym test_call1_xsym[] = {
1531 .name = RTE_STR(dummy_func1),
1532 .type = RTE_BPF_XTYPE_FUNC,
1534 .val = (void *)dummy_func1,
1538 .type = RTE_BPF_ARG_PTR,
1539 .size = sizeof(struct dummy_offset),
1542 .type = RTE_BPF_ARG_PTR,
1543 .size = sizeof(uint32_t),
1546 .type = RTE_BPF_ARG_PTR,
1547 .size = sizeof(uint64_t),
1554 static const struct ebpf_insn test_call2_prog[] = {
1557 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1558 .dst_reg = EBPF_REG_1,
1559 .src_reg = EBPF_REG_10,
1562 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1563 .dst_reg = EBPF_REG_1,
1564 .imm = -(int32_t)sizeof(struct dummy_offset),
1567 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1568 .dst_reg = EBPF_REG_2,
1569 .src_reg = EBPF_REG_10,
1572 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1573 .dst_reg = EBPF_REG_2,
1574 .imm = -2 * (int32_t)sizeof(struct dummy_offset),
1577 .code = (BPF_JMP | EBPF_CALL),
1581 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1582 .dst_reg = EBPF_REG_1,
1583 .src_reg = EBPF_REG_10,
1584 .off = -(int32_t)(sizeof(struct dummy_offset) -
1585 offsetof(struct dummy_offset, u64)),
1588 .code = (BPF_LDX | BPF_MEM | BPF_W),
1589 .dst_reg = EBPF_REG_0,
1590 .src_reg = EBPF_REG_10,
1591 .off = -(int32_t)(sizeof(struct dummy_offset) -
1592 offsetof(struct dummy_offset, u32)),
1595 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1596 .dst_reg = EBPF_REG_0,
1597 .src_reg = EBPF_REG_1,
1600 .code = (BPF_LDX | BPF_MEM | BPF_H),
1601 .dst_reg = EBPF_REG_1,
1602 .src_reg = EBPF_REG_10,
1603 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1604 offsetof(struct dummy_offset, u16)),
1607 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1608 .dst_reg = EBPF_REG_0,
1609 .src_reg = EBPF_REG_1,
1612 .code = (BPF_LDX | BPF_MEM | BPF_B),
1613 .dst_reg = EBPF_REG_1,
1614 .src_reg = EBPF_REG_10,
1615 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1616 offsetof(struct dummy_offset, u8)),
1619 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1620 .dst_reg = EBPF_REG_0,
1621 .src_reg = EBPF_REG_1,
1624 .code = (BPF_JMP | EBPF_EXIT),
1630 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1646 test_call2_check(uint64_t rc, const void *arg)
1649 struct dummy_offset a, b;
1653 dummy_func2(&a, &b);
1654 v = a.u64 + a.u32 + b.u16 + b.u8;
1657 printf("%s@%d: invalid return value "
1658 "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
1659 __func__, __LINE__, v, rc);
1665 static const struct rte_bpf_xsym test_call2_xsym[] = {
1667 .name = RTE_STR(dummy_func2),
1668 .type = RTE_BPF_XTYPE_FUNC,
1670 .val = (void *)dummy_func2,
1674 .type = RTE_BPF_ARG_PTR,
1675 .size = sizeof(struct dummy_offset),
1678 .type = RTE_BPF_ARG_PTR,
1679 .size = sizeof(struct dummy_offset),
1686 static const struct bpf_test tests[] = {
1688 .name = "test_store1",
1689 .arg_sz = sizeof(struct dummy_offset),
1691 .ins = test_store1_prog,
1692 .nb_ins = RTE_DIM(test_store1_prog),
1694 .type = RTE_BPF_ARG_PTR,
1695 .size = sizeof(struct dummy_offset),
1698 .prepare = test_store1_prepare,
1699 .check_result = test_store1_check,
1702 .name = "test_store2",
1703 .arg_sz = sizeof(struct dummy_offset),
1705 .ins = test_store2_prog,
1706 .nb_ins = RTE_DIM(test_store2_prog),
1708 .type = RTE_BPF_ARG_PTR,
1709 .size = sizeof(struct dummy_offset),
1712 .prepare = test_store1_prepare,
1713 .check_result = test_store1_check,
1716 .name = "test_load1",
1717 .arg_sz = sizeof(struct dummy_offset),
1719 .ins = test_load1_prog,
1720 .nb_ins = RTE_DIM(test_load1_prog),
1722 .type = RTE_BPF_ARG_PTR,
1723 .size = sizeof(struct dummy_offset),
1726 .prepare = test_load1_prepare,
1727 .check_result = test_load1_check,
1730 .name = "test_mul1",
1731 .arg_sz = sizeof(struct dummy_vect8),
1733 .ins = test_mul1_prog,
1734 .nb_ins = RTE_DIM(test_mul1_prog),
1736 .type = RTE_BPF_ARG_PTR,
1737 .size = sizeof(struct dummy_vect8),
1740 .prepare = test_mul1_prepare,
1741 .check_result = test_mul1_check,
1744 .name = "test_shift1",
1745 .arg_sz = sizeof(struct dummy_vect8),
1747 .ins = test_shift1_prog,
1748 .nb_ins = RTE_DIM(test_shift1_prog),
1750 .type = RTE_BPF_ARG_PTR,
1751 .size = sizeof(struct dummy_vect8),
1754 .prepare = test_shift1_prepare,
1755 .check_result = test_shift1_check,
1758 .name = "test_jump1",
1759 .arg_sz = sizeof(struct dummy_vect8),
1761 .ins = test_jump1_prog,
1762 .nb_ins = RTE_DIM(test_jump1_prog),
1764 .type = RTE_BPF_ARG_PTR,
1765 .size = sizeof(struct dummy_vect8),
1768 .prepare = test_jump1_prepare,
1769 .check_result = test_jump1_check,
1772 .name = "test_alu1",
1773 .arg_sz = sizeof(struct dummy_vect8),
1775 .ins = test_alu1_prog,
1776 .nb_ins = RTE_DIM(test_alu1_prog),
1778 .type = RTE_BPF_ARG_PTR,
1779 .size = sizeof(struct dummy_vect8),
1782 .prepare = test_jump1_prepare,
1783 .check_result = test_alu1_check,
1786 .name = "test_bele1",
1787 .arg_sz = sizeof(struct dummy_vect8),
1789 .ins = test_bele1_prog,
1790 .nb_ins = RTE_DIM(test_bele1_prog),
1792 .type = RTE_BPF_ARG_PTR,
1793 .size = sizeof(struct dummy_vect8),
1796 .prepare = test_bele1_prepare,
1797 .check_result = test_bele1_check,
1800 .name = "test_xadd1",
1801 .arg_sz = sizeof(struct dummy_offset),
1803 .ins = test_xadd1_prog,
1804 .nb_ins = RTE_DIM(test_xadd1_prog),
1806 .type = RTE_BPF_ARG_PTR,
1807 .size = sizeof(struct dummy_offset),
1810 .prepare = test_store1_prepare,
1811 .check_result = test_xadd1_check,
1814 .name = "test_div1",
1815 .arg_sz = sizeof(struct dummy_vect8),
1817 .ins = test_div1_prog,
1818 .nb_ins = RTE_DIM(test_div1_prog),
1820 .type = RTE_BPF_ARG_PTR,
1821 .size = sizeof(struct dummy_vect8),
1824 .prepare = test_mul1_prepare,
1825 .check_result = test_div1_check,
1828 .name = "test_call1",
1829 .arg_sz = sizeof(struct dummy_offset),
1831 .ins = test_call1_prog,
1832 .nb_ins = RTE_DIM(test_call1_prog),
1834 .type = RTE_BPF_ARG_PTR,
1835 .size = sizeof(struct dummy_offset),
1837 .xsym = test_call1_xsym,
1838 .nb_xsym = RTE_DIM(test_call1_xsym),
1840 .prepare = test_load1_prepare,
1841 .check_result = test_call1_check,
1842 /* for now don't support function calls on 32 bit platform */
1843 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
1846 .name = "test_call2",
1847 .arg_sz = sizeof(struct dummy_offset),
1849 .ins = test_call2_prog,
1850 .nb_ins = RTE_DIM(test_call2_prog),
1852 .type = RTE_BPF_ARG_PTR,
1853 .size = sizeof(struct dummy_offset),
1855 .xsym = test_call2_xsym,
1856 .nb_xsym = RTE_DIM(test_call2_xsym),
1858 .prepare = test_store1_prepare,
1859 .check_result = test_call2_check,
1860 /* for now don't support function calls on 32 bit platform */
1861 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
1866 run_test(const struct bpf_test *tst)
1870 struct rte_bpf *bpf;
1871 struct rte_bpf_jit jit;
1872 uint8_t tbuf[tst->arg_sz];
1874 printf("%s(%s) start\n", __func__, tst->name);
1876 bpf = rte_bpf_load(&tst->prm);
1878 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
1879 __func__, __LINE__, rte_errno, strerror(rte_errno));
1885 rc = rte_bpf_exec(bpf, tbuf);
1886 ret = tst->check_result(rc, tbuf);
1888 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
1889 __func__, __LINE__, tst->name, ret, strerror(ret));
1892 rte_bpf_get_jit(bpf, &jit);
1893 if (jit.func == NULL)
1897 rc = jit.func(tbuf);
1898 rv = tst->check_result(rc, tbuf);
1901 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
1902 __func__, __LINE__, tst->name, rv, strerror(ret));
1905 rte_bpf_destroy(bpf);
1917 for (i = 0; i != RTE_DIM(tests); i++) {
1918 rv = run_test(tests + i);
1919 if (tests[i].allow_fail == 0)
1926 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);