1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_random.h>
14 #include <rte_byteorder.h>
15 #include <rte_errno.h>
21 * Basic functional tests for librte_bpf.
22 * The main procedure - load eBPF program, execute it and
23 * compare restuls with expected values.
34 struct dummy_offset in[8];
35 struct dummy_offset out[8];
38 #define TEST_FILL_1 0xDEADBEEF
41 #define TEST_MUL_2 -100
43 #define TEST_SHIFT_1 15
44 #define TEST_SHIFT_2 33
47 #define TEST_JCC_2 -123
48 #define TEST_JCC_3 5678
49 #define TEST_JCC_4 TEST_FILL_1
51 #define TEST_IMM_1 UINT64_MAX
52 #define TEST_IMM_2 ((uint64_t)INT64_MIN)
53 #define TEST_IMM_3 ((uint64_t)INT64_MAX + INT32_MAX)
54 #define TEST_IMM_4 ((uint64_t)UINT32_MAX)
55 #define TEST_IMM_5 ((uint64_t)UINT32_MAX + 1)
60 struct rte_bpf_prm prm;
61 void (*prepare)(void *);
62 int (*check_result)(uint64_t, const void *);
67 * Compare return value and result data with expected ones.
68 * Report a failure if they don't match.
71 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
72 const void *exp_res, const void *ret_res, size_t res_sz)
77 if (exp_rc != ret_rc) {
78 printf("%s@%d: invalid return value, expected: 0x%" PRIx64
79 ",result: 0x%" PRIx64 "\n",
80 func, __LINE__, exp_rc, ret_rc);
84 if (memcmp(exp_res, ret_res, res_sz) != 0) {
85 printf("%s: invalid value\n", func);
86 rte_memdump(stdout, "expected", exp_res, res_sz);
87 rte_memdump(stdout, "result", ret_res, res_sz);
94 /* store immediate test-cases */
95 static const struct ebpf_insn test_store1_prog[] = {
97 .code = (BPF_ST | BPF_MEM | BPF_B),
98 .dst_reg = EBPF_REG_1,
99 .off = offsetof(struct dummy_offset, u8),
103 .code = (BPF_ST | BPF_MEM | BPF_H),
104 .dst_reg = EBPF_REG_1,
105 .off = offsetof(struct dummy_offset, u16),
109 .code = (BPF_ST | BPF_MEM | BPF_W),
110 .dst_reg = EBPF_REG_1,
111 .off = offsetof(struct dummy_offset, u32),
115 .code = (BPF_ST | BPF_MEM | EBPF_DW),
116 .dst_reg = EBPF_REG_1,
117 .off = offsetof(struct dummy_offset, u64),
122 .code = (BPF_ALU | EBPF_MOV | BPF_K),
123 .dst_reg = EBPF_REG_0,
127 .code = (BPF_JMP | EBPF_EXIT),
132 test_store1_prepare(void *arg)
134 struct dummy_offset *df;
137 memset(df, 0, sizeof(*df));
141 test_store1_check(uint64_t rc, const void *arg)
143 const struct dummy_offset *dft;
144 struct dummy_offset dfe;
148 memset(&dfe, 0, sizeof(dfe));
149 dfe.u64 = (int32_t)TEST_FILL_1;
154 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
157 /* store register test-cases */
158 static const struct ebpf_insn test_store2_prog[] = {
161 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
162 .dst_reg = EBPF_REG_2,
166 .code = (BPF_STX | BPF_MEM | BPF_B),
167 .dst_reg = EBPF_REG_1,
168 .src_reg = EBPF_REG_2,
169 .off = offsetof(struct dummy_offset, u8),
172 .code = (BPF_STX | BPF_MEM | BPF_H),
173 .dst_reg = EBPF_REG_1,
174 .src_reg = EBPF_REG_2,
175 .off = offsetof(struct dummy_offset, u16),
178 .code = (BPF_STX | BPF_MEM | BPF_W),
179 .dst_reg = EBPF_REG_1,
180 .src_reg = EBPF_REG_2,
181 .off = offsetof(struct dummy_offset, u32),
184 .code = (BPF_STX | BPF_MEM | EBPF_DW),
185 .dst_reg = EBPF_REG_1,
186 .src_reg = EBPF_REG_2,
187 .off = offsetof(struct dummy_offset, u64),
191 .code = (BPF_ALU | EBPF_MOV | BPF_K),
192 .dst_reg = EBPF_REG_0,
196 .code = (BPF_JMP | EBPF_EXIT),
200 /* load test-cases */
201 static const struct ebpf_insn test_load1_prog[] = {
204 .code = (BPF_LDX | BPF_MEM | BPF_B),
205 .dst_reg = EBPF_REG_2,
206 .src_reg = EBPF_REG_1,
207 .off = offsetof(struct dummy_offset, u8),
210 .code = (BPF_LDX | BPF_MEM | BPF_H),
211 .dst_reg = EBPF_REG_3,
212 .src_reg = EBPF_REG_1,
213 .off = offsetof(struct dummy_offset, u16),
216 .code = (BPF_LDX | BPF_MEM | BPF_W),
217 .dst_reg = EBPF_REG_4,
218 .src_reg = EBPF_REG_1,
219 .off = offsetof(struct dummy_offset, u32),
222 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
223 .dst_reg = EBPF_REG_0,
224 .src_reg = EBPF_REG_1,
225 .off = offsetof(struct dummy_offset, u64),
229 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
230 .dst_reg = EBPF_REG_0,
231 .src_reg = EBPF_REG_4,
234 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
235 .dst_reg = EBPF_REG_0,
236 .src_reg = EBPF_REG_3,
239 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
240 .dst_reg = EBPF_REG_0,
241 .src_reg = EBPF_REG_2,
244 .code = (BPF_JMP | EBPF_EXIT),
249 test_load1_prepare(void *arg)
251 struct dummy_offset *df;
255 memset(df, 0, sizeof(*df));
256 df->u64 = (int32_t)TEST_FILL_1;
263 test_load1_check(uint64_t rc, const void *arg)
266 const struct dummy_offset *dft;
274 return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
277 /* load immediate test-cases */
278 static const struct ebpf_insn test_ldimm1_prog[] = {
281 .code = (BPF_LD | BPF_IMM | EBPF_DW),
282 .dst_reg = EBPF_REG_0,
283 .imm = (uint32_t)TEST_IMM_1,
286 .imm = TEST_IMM_1 >> 32,
289 .code = (BPF_LD | BPF_IMM | EBPF_DW),
290 .dst_reg = EBPF_REG_3,
291 .imm = (uint32_t)TEST_IMM_2,
294 .imm = TEST_IMM_2 >> 32,
297 .code = (BPF_LD | BPF_IMM | EBPF_DW),
298 .dst_reg = EBPF_REG_5,
299 .imm = (uint32_t)TEST_IMM_3,
302 .imm = TEST_IMM_3 >> 32,
305 .code = (BPF_LD | BPF_IMM | EBPF_DW),
306 .dst_reg = EBPF_REG_7,
307 .imm = (uint32_t)TEST_IMM_4,
310 .imm = TEST_IMM_4 >> 32,
313 .code = (BPF_LD | BPF_IMM | EBPF_DW),
314 .dst_reg = EBPF_REG_9,
315 .imm = (uint32_t)TEST_IMM_5,
318 .imm = TEST_IMM_5 >> 32,
322 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
323 .dst_reg = EBPF_REG_0,
324 .src_reg = EBPF_REG_3,
327 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
328 .dst_reg = EBPF_REG_0,
329 .src_reg = EBPF_REG_5,
332 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
333 .dst_reg = EBPF_REG_0,
334 .src_reg = EBPF_REG_7,
337 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
338 .dst_reg = EBPF_REG_0,
339 .src_reg = EBPF_REG_9,
342 .code = (BPF_JMP | EBPF_EXIT),
347 test_ldimm1_check(uint64_t rc, const void *arg)
361 return cmp_res(__func__, v1, rc, arg, arg, 0);
365 /* alu mul test-cases */
366 static const struct ebpf_insn test_mul1_prog[] = {
369 .code = (BPF_LDX | BPF_MEM | BPF_W),
370 .dst_reg = EBPF_REG_2,
371 .src_reg = EBPF_REG_1,
372 .off = offsetof(struct dummy_vect8, in[0].u32),
375 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
376 .dst_reg = EBPF_REG_3,
377 .src_reg = EBPF_REG_1,
378 .off = offsetof(struct dummy_vect8, in[1].u64),
381 .code = (BPF_LDX | BPF_MEM | BPF_W),
382 .dst_reg = EBPF_REG_4,
383 .src_reg = EBPF_REG_1,
384 .off = offsetof(struct dummy_vect8, in[2].u32),
387 .code = (BPF_ALU | BPF_MUL | BPF_K),
388 .dst_reg = EBPF_REG_2,
392 .code = (EBPF_ALU64 | BPF_MUL | BPF_K),
393 .dst_reg = EBPF_REG_3,
397 .code = (BPF_ALU | BPF_MUL | BPF_X),
398 .dst_reg = EBPF_REG_4,
399 .src_reg = EBPF_REG_2,
402 .code = (EBPF_ALU64 | BPF_MUL | BPF_X),
403 .dst_reg = EBPF_REG_4,
404 .src_reg = EBPF_REG_3,
407 .code = (BPF_STX | BPF_MEM | EBPF_DW),
408 .dst_reg = EBPF_REG_1,
409 .src_reg = EBPF_REG_2,
410 .off = offsetof(struct dummy_vect8, out[0].u64),
413 .code = (BPF_STX | BPF_MEM | EBPF_DW),
414 .dst_reg = EBPF_REG_1,
415 .src_reg = EBPF_REG_3,
416 .off = offsetof(struct dummy_vect8, out[1].u64),
419 .code = (BPF_STX | BPF_MEM | EBPF_DW),
420 .dst_reg = EBPF_REG_1,
421 .src_reg = EBPF_REG_4,
422 .off = offsetof(struct dummy_vect8, out[2].u64),
426 .code = (BPF_ALU | EBPF_MOV | BPF_K),
427 .dst_reg = EBPF_REG_0,
431 .code = (BPF_JMP | EBPF_EXIT),
436 test_mul1_prepare(void *arg)
438 struct dummy_vect8 *dv;
445 memset(dv, 0, sizeof(*dv));
447 dv->in[1].u64 = v << 12 | v >> 6;
452 test_mul1_check(uint64_t rc, const void *arg)
455 const struct dummy_vect8 *dvt;
456 struct dummy_vect8 dve;
459 memset(&dve, 0, sizeof(dve));
465 r2 = (uint32_t)r2 * TEST_MUL_1;
467 r4 = (uint32_t)(r4 * r2);
474 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
477 /* alu shift test-cases */
478 static const struct ebpf_insn test_shift1_prog[] = {
481 .code = (BPF_LDX | BPF_MEM | BPF_W),
482 .dst_reg = EBPF_REG_2,
483 .src_reg = EBPF_REG_1,
484 .off = offsetof(struct dummy_vect8, in[0].u32),
487 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
488 .dst_reg = EBPF_REG_3,
489 .src_reg = EBPF_REG_1,
490 .off = offsetof(struct dummy_vect8, in[1].u64),
493 .code = (BPF_LDX | BPF_MEM | BPF_W),
494 .dst_reg = EBPF_REG_4,
495 .src_reg = EBPF_REG_1,
496 .off = offsetof(struct dummy_vect8, in[2].u32),
499 .code = (BPF_ALU | BPF_LSH | BPF_K),
500 .dst_reg = EBPF_REG_2,
504 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
505 .dst_reg = EBPF_REG_3,
509 .code = (BPF_STX | BPF_MEM | EBPF_DW),
510 .dst_reg = EBPF_REG_1,
511 .src_reg = EBPF_REG_2,
512 .off = offsetof(struct dummy_vect8, out[0].u64),
515 .code = (BPF_STX | BPF_MEM | EBPF_DW),
516 .dst_reg = EBPF_REG_1,
517 .src_reg = EBPF_REG_3,
518 .off = offsetof(struct dummy_vect8, out[1].u64),
521 .code = (BPF_ALU | BPF_RSH | BPF_X),
522 .dst_reg = EBPF_REG_2,
523 .src_reg = EBPF_REG_4,
526 .code = (EBPF_ALU64 | BPF_LSH | BPF_X),
527 .dst_reg = EBPF_REG_3,
528 .src_reg = EBPF_REG_4,
531 .code = (BPF_STX | BPF_MEM | EBPF_DW),
532 .dst_reg = EBPF_REG_1,
533 .src_reg = EBPF_REG_2,
534 .off = offsetof(struct dummy_vect8, out[2].u64),
537 .code = (BPF_STX | BPF_MEM | EBPF_DW),
538 .dst_reg = EBPF_REG_1,
539 .src_reg = EBPF_REG_3,
540 .off = offsetof(struct dummy_vect8, out[3].u64),
543 .code = (BPF_LDX | BPF_MEM | BPF_W),
544 .dst_reg = EBPF_REG_2,
545 .src_reg = EBPF_REG_1,
546 .off = offsetof(struct dummy_vect8, in[0].u32),
549 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
550 .dst_reg = EBPF_REG_3,
551 .src_reg = EBPF_REG_1,
552 .off = offsetof(struct dummy_vect8, in[1].u64),
555 .code = (BPF_LDX | BPF_MEM | BPF_W),
556 .dst_reg = EBPF_REG_4,
557 .src_reg = EBPF_REG_1,
558 .off = offsetof(struct dummy_vect8, in[2].u32),
561 .code = (BPF_ALU | BPF_AND | BPF_K),
562 .dst_reg = EBPF_REG_2,
563 .imm = sizeof(uint64_t) * CHAR_BIT - 1,
566 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
567 .dst_reg = EBPF_REG_3,
568 .src_reg = EBPF_REG_2,
571 .code = (BPF_ALU | BPF_AND | BPF_K),
572 .dst_reg = EBPF_REG_2,
573 .imm = sizeof(uint32_t) * CHAR_BIT - 1,
576 .code = (BPF_ALU | BPF_LSH | BPF_X),
577 .dst_reg = EBPF_REG_4,
578 .src_reg = EBPF_REG_2,
581 .code = (BPF_STX | BPF_MEM | EBPF_DW),
582 .dst_reg = EBPF_REG_1,
583 .src_reg = EBPF_REG_4,
584 .off = offsetof(struct dummy_vect8, out[4].u64),
587 .code = (BPF_STX | BPF_MEM | EBPF_DW),
588 .dst_reg = EBPF_REG_1,
589 .src_reg = EBPF_REG_3,
590 .off = offsetof(struct dummy_vect8, out[5].u64),
594 .code = (BPF_ALU | EBPF_MOV | BPF_K),
595 .dst_reg = EBPF_REG_0,
599 .code = (BPF_JMP | EBPF_EXIT),
604 test_shift1_prepare(void *arg)
606 struct dummy_vect8 *dv;
613 memset(dv, 0, sizeof(*dv));
615 dv->in[1].u64 = v << 12 | v >> 6;
616 dv->in[2].u32 = (-v ^ 5);
620 test_shift1_check(uint64_t rc, const void *arg)
623 const struct dummy_vect8 *dvt;
624 struct dummy_vect8 dve;
627 memset(&dve, 0, sizeof(dve));
633 r2 = (uint32_t)r2 << TEST_SHIFT_1;
634 r3 = (int64_t)r3 >> TEST_SHIFT_2;
639 r2 = (uint32_t)r2 >> r4;
649 r2 &= sizeof(uint64_t) * CHAR_BIT - 1;
650 r3 = (int64_t)r3 >> r2;
651 r2 &= sizeof(uint32_t) * CHAR_BIT - 1;
652 r4 = (uint32_t)r4 << r2;
657 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
661 static const struct ebpf_insn test_jump1_prog[] = {
664 .code = (BPF_ALU | EBPF_MOV | BPF_K),
665 .dst_reg = EBPF_REG_0,
669 .code = (BPF_LDX | BPF_MEM | BPF_W),
670 .dst_reg = EBPF_REG_2,
671 .src_reg = EBPF_REG_1,
672 .off = offsetof(struct dummy_vect8, in[0].u32),
675 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
676 .dst_reg = EBPF_REG_3,
677 .src_reg = EBPF_REG_1,
678 .off = offsetof(struct dummy_vect8, in[0].u64),
681 .code = (BPF_LDX | BPF_MEM | BPF_W),
682 .dst_reg = EBPF_REG_4,
683 .src_reg = EBPF_REG_1,
684 .off = offsetof(struct dummy_vect8, in[1].u32),
687 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
688 .dst_reg = EBPF_REG_5,
689 .src_reg = EBPF_REG_1,
690 .off = offsetof(struct dummy_vect8, in[1].u64),
693 .code = (BPF_JMP | BPF_JEQ | BPF_K),
694 .dst_reg = EBPF_REG_2,
699 .code = (BPF_JMP | EBPF_JSLE | BPF_K),
700 .dst_reg = EBPF_REG_3,
705 .code = (BPF_JMP | BPF_JGT | BPF_K),
706 .dst_reg = EBPF_REG_4,
711 .code = (BPF_JMP | BPF_JSET | BPF_K),
712 .dst_reg = EBPF_REG_5,
717 .code = (BPF_JMP | EBPF_JNE | BPF_X),
718 .dst_reg = EBPF_REG_2,
719 .src_reg = EBPF_REG_3,
723 .code = (BPF_JMP | EBPF_JSGT | BPF_X),
724 .dst_reg = EBPF_REG_2,
725 .src_reg = EBPF_REG_4,
729 .code = (BPF_JMP | EBPF_JLE | BPF_X),
730 .dst_reg = EBPF_REG_2,
731 .src_reg = EBPF_REG_5,
735 .code = (BPF_JMP | BPF_JSET | BPF_X),
736 .dst_reg = EBPF_REG_3,
737 .src_reg = EBPF_REG_5,
741 .code = (BPF_JMP | EBPF_EXIT),
744 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
745 .dst_reg = EBPF_REG_0,
749 .code = (BPF_JMP | BPF_JA),
753 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
754 .dst_reg = EBPF_REG_0,
758 .code = (BPF_JMP | BPF_JA),
762 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
763 .dst_reg = EBPF_REG_0,
767 .code = (BPF_JMP | BPF_JA),
771 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
772 .dst_reg = EBPF_REG_0,
776 .code = (BPF_JMP | BPF_JA),
780 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
781 .dst_reg = EBPF_REG_0,
785 .code = (BPF_JMP | BPF_JA),
789 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
790 .dst_reg = EBPF_REG_0,
794 .code = (BPF_JMP | BPF_JA),
798 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
799 .dst_reg = EBPF_REG_0,
803 .code = (BPF_JMP | BPF_JA),
807 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
808 .dst_reg = EBPF_REG_0,
812 .code = (BPF_JMP | BPF_JA),
818 test_jump1_prepare(void *arg)
820 struct dummy_vect8 *dv;
828 memset(dv, 0, sizeof(*dv));
831 dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
832 dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
836 test_jump1_check(uint64_t rc, const void *arg)
838 uint64_t r2, r3, r4, r5, rv;
839 const struct dummy_vect8 *dvt;
849 if (r2 == TEST_JCC_1)
851 if ((int64_t)r3 <= TEST_JCC_2)
859 if ((int64_t)r2 > (int64_t)r4)
866 return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
869 /* alu (add, sub, and, or, xor, neg) test-cases */
870 static const struct ebpf_insn test_alu1_prog[] = {
873 .code = (BPF_LDX | BPF_MEM | BPF_W),
874 .dst_reg = EBPF_REG_2,
875 .src_reg = EBPF_REG_1,
876 .off = offsetof(struct dummy_vect8, in[0].u32),
879 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
880 .dst_reg = EBPF_REG_3,
881 .src_reg = EBPF_REG_1,
882 .off = offsetof(struct dummy_vect8, in[0].u64),
885 .code = (BPF_LDX | BPF_MEM | BPF_W),
886 .dst_reg = EBPF_REG_4,
887 .src_reg = EBPF_REG_1,
888 .off = offsetof(struct dummy_vect8, in[1].u32),
891 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
892 .dst_reg = EBPF_REG_5,
893 .src_reg = EBPF_REG_1,
894 .off = offsetof(struct dummy_vect8, in[1].u64),
897 .code = (BPF_ALU | BPF_AND | BPF_K),
898 .dst_reg = EBPF_REG_2,
902 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
903 .dst_reg = EBPF_REG_3,
907 .code = (BPF_ALU | BPF_XOR | BPF_K),
908 .dst_reg = EBPF_REG_4,
912 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
913 .dst_reg = EBPF_REG_5,
917 .code = (BPF_STX | BPF_MEM | EBPF_DW),
918 .dst_reg = EBPF_REG_1,
919 .src_reg = EBPF_REG_2,
920 .off = offsetof(struct dummy_vect8, out[0].u64),
923 .code = (BPF_STX | BPF_MEM | EBPF_DW),
924 .dst_reg = EBPF_REG_1,
925 .src_reg = EBPF_REG_3,
926 .off = offsetof(struct dummy_vect8, out[1].u64),
929 .code = (BPF_STX | BPF_MEM | EBPF_DW),
930 .dst_reg = EBPF_REG_1,
931 .src_reg = EBPF_REG_4,
932 .off = offsetof(struct dummy_vect8, out[2].u64),
935 .code = (BPF_STX | BPF_MEM | EBPF_DW),
936 .dst_reg = EBPF_REG_1,
937 .src_reg = EBPF_REG_5,
938 .off = offsetof(struct dummy_vect8, out[3].u64),
941 .code = (BPF_ALU | BPF_OR | BPF_X),
942 .dst_reg = EBPF_REG_2,
943 .src_reg = EBPF_REG_3,
946 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
947 .dst_reg = EBPF_REG_3,
948 .src_reg = EBPF_REG_4,
951 .code = (BPF_ALU | BPF_SUB | BPF_X),
952 .dst_reg = EBPF_REG_4,
953 .src_reg = EBPF_REG_5,
956 .code = (EBPF_ALU64 | BPF_AND | BPF_X),
957 .dst_reg = EBPF_REG_5,
958 .src_reg = EBPF_REG_2,
961 .code = (BPF_STX | BPF_MEM | EBPF_DW),
962 .dst_reg = EBPF_REG_1,
963 .src_reg = EBPF_REG_2,
964 .off = offsetof(struct dummy_vect8, out[4].u64),
967 .code = (BPF_STX | BPF_MEM | EBPF_DW),
968 .dst_reg = EBPF_REG_1,
969 .src_reg = EBPF_REG_3,
970 .off = offsetof(struct dummy_vect8, out[5].u64),
973 .code = (BPF_STX | BPF_MEM | EBPF_DW),
974 .dst_reg = EBPF_REG_1,
975 .src_reg = EBPF_REG_4,
976 .off = offsetof(struct dummy_vect8, out[6].u64),
979 .code = (BPF_STX | BPF_MEM | EBPF_DW),
980 .dst_reg = EBPF_REG_1,
981 .src_reg = EBPF_REG_5,
982 .off = offsetof(struct dummy_vect8, out[7].u64),
984 /* return (-r2 + (-r3)) */
986 .code = (BPF_ALU | BPF_NEG),
987 .dst_reg = EBPF_REG_2,
990 .code = (EBPF_ALU64 | BPF_NEG),
991 .dst_reg = EBPF_REG_3,
994 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
995 .dst_reg = EBPF_REG_2,
996 .src_reg = EBPF_REG_3,
999 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1000 .dst_reg = EBPF_REG_0,
1001 .src_reg = EBPF_REG_2,
1004 .code = (BPF_JMP | EBPF_EXIT),
1009 test_alu1_check(uint64_t rc, const void *arg)
1011 uint64_t r2, r3, r4, r5, rv;
1012 const struct dummy_vect8 *dvt;
1013 struct dummy_vect8 dve;
1016 memset(&dve, 0, sizeof(dve));
1018 r2 = dvt->in[0].u32;
1019 r3 = dvt->in[0].u64;
1020 r4 = dvt->in[1].u32;
1021 r5 = dvt->in[1].u64;
1023 r2 = (uint32_t)r2 & TEST_FILL_1;
1024 r3 |= (int32_t) TEST_FILL_1;
1025 r4 = (uint32_t)r4 ^ TEST_FILL_1;
1026 r5 += (int32_t)TEST_FILL_1;
1028 dve.out[0].u64 = r2;
1029 dve.out[1].u64 = r3;
1030 dve.out[2].u64 = r4;
1031 dve.out[3].u64 = r5;
1033 r2 = (uint32_t)r2 | (uint32_t)r3;
1035 r4 = (uint32_t)r4 - (uint32_t)r5;
1038 dve.out[4].u64 = r2;
1039 dve.out[5].u64 = r3;
1040 dve.out[6].u64 = r4;
1041 dve.out[7].u64 = r5;
1048 return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1051 /* endianness conversions (BE->LE/LE->BE) test-cases */
1052 static const struct ebpf_insn test_bele1_prog[] = {
1055 .code = (BPF_LDX | BPF_MEM | BPF_H),
1056 .dst_reg = EBPF_REG_2,
1057 .src_reg = EBPF_REG_1,
1058 .off = offsetof(struct dummy_vect8, in[0].u16),
1061 .code = (BPF_LDX | BPF_MEM | BPF_W),
1062 .dst_reg = EBPF_REG_3,
1063 .src_reg = EBPF_REG_1,
1064 .off = offsetof(struct dummy_vect8, in[0].u32),
1067 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1068 .dst_reg = EBPF_REG_4,
1069 .src_reg = EBPF_REG_1,
1070 .off = offsetof(struct dummy_vect8, in[0].u64),
1073 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1074 .dst_reg = EBPF_REG_2,
1075 .imm = sizeof(uint16_t) * CHAR_BIT,
1078 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1079 .dst_reg = EBPF_REG_3,
1080 .imm = sizeof(uint32_t) * CHAR_BIT,
1083 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1084 .dst_reg = EBPF_REG_4,
1085 .imm = sizeof(uint64_t) * CHAR_BIT,
1088 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1089 .dst_reg = EBPF_REG_1,
1090 .src_reg = EBPF_REG_2,
1091 .off = offsetof(struct dummy_vect8, out[0].u64),
1094 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1095 .dst_reg = EBPF_REG_1,
1096 .src_reg = EBPF_REG_3,
1097 .off = offsetof(struct dummy_vect8, out[1].u64),
1100 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1101 .dst_reg = EBPF_REG_1,
1102 .src_reg = EBPF_REG_4,
1103 .off = offsetof(struct dummy_vect8, out[2].u64),
1106 .code = (BPF_LDX | BPF_MEM | BPF_H),
1107 .dst_reg = EBPF_REG_2,
1108 .src_reg = EBPF_REG_1,
1109 .off = offsetof(struct dummy_vect8, in[0].u16),
1112 .code = (BPF_LDX | BPF_MEM | BPF_W),
1113 .dst_reg = EBPF_REG_3,
1114 .src_reg = EBPF_REG_1,
1115 .off = offsetof(struct dummy_vect8, in[0].u32),
1118 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1119 .dst_reg = EBPF_REG_4,
1120 .src_reg = EBPF_REG_1,
1121 .off = offsetof(struct dummy_vect8, in[0].u64),
1124 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1125 .dst_reg = EBPF_REG_2,
1126 .imm = sizeof(uint16_t) * CHAR_BIT,
1129 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1130 .dst_reg = EBPF_REG_3,
1131 .imm = sizeof(uint32_t) * CHAR_BIT,
1134 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1135 .dst_reg = EBPF_REG_4,
1136 .imm = sizeof(uint64_t) * CHAR_BIT,
1139 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1140 .dst_reg = EBPF_REG_1,
1141 .src_reg = EBPF_REG_2,
1142 .off = offsetof(struct dummy_vect8, out[3].u64),
1145 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1146 .dst_reg = EBPF_REG_1,
1147 .src_reg = EBPF_REG_3,
1148 .off = offsetof(struct dummy_vect8, out[4].u64),
1151 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1152 .dst_reg = EBPF_REG_1,
1153 .src_reg = EBPF_REG_4,
1154 .off = offsetof(struct dummy_vect8, out[5].u64),
1158 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1159 .dst_reg = EBPF_REG_0,
1163 .code = (BPF_JMP | EBPF_EXIT),
1168 test_bele1_prepare(void *arg)
1170 struct dummy_vect8 *dv;
1174 memset(dv, 0, sizeof(*dv));
1175 dv->in[0].u64 = rte_rand();
1176 dv->in[0].u32 = dv->in[0].u64;
1177 dv->in[0].u16 = dv->in[0].u64;
1181 test_bele1_check(uint64_t rc, const void *arg)
1183 uint64_t r2, r3, r4;
1184 const struct dummy_vect8 *dvt;
1185 struct dummy_vect8 dve;
1188 memset(&dve, 0, sizeof(dve));
1190 r2 = dvt->in[0].u16;
1191 r3 = dvt->in[0].u32;
1192 r4 = dvt->in[0].u64;
1194 r2 = rte_cpu_to_be_16(r2);
1195 r3 = rte_cpu_to_be_32(r3);
1196 r4 = rte_cpu_to_be_64(r4);
1198 dve.out[0].u64 = r2;
1199 dve.out[1].u64 = r3;
1200 dve.out[2].u64 = r4;
1202 r2 = dvt->in[0].u16;
1203 r3 = dvt->in[0].u32;
1204 r4 = dvt->in[0].u64;
1206 r2 = rte_cpu_to_le_16(r2);
1207 r3 = rte_cpu_to_le_32(r3);
1208 r4 = rte_cpu_to_le_64(r4);
1210 dve.out[3].u64 = r2;
1211 dve.out[4].u64 = r3;
1212 dve.out[5].u64 = r4;
1214 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1217 /* atomic add test-cases */
1218 static const struct ebpf_insn test_xadd1_prog[] = {
1221 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1222 .dst_reg = EBPF_REG_2,
1226 .code = (BPF_STX | EBPF_XADD | BPF_W),
1227 .dst_reg = EBPF_REG_1,
1228 .src_reg = EBPF_REG_2,
1229 .off = offsetof(struct dummy_offset, u32),
1232 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1233 .dst_reg = EBPF_REG_1,
1234 .src_reg = EBPF_REG_2,
1235 .off = offsetof(struct dummy_offset, u64),
1238 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1239 .dst_reg = EBPF_REG_3,
1243 .code = (BPF_STX | EBPF_XADD | BPF_W),
1244 .dst_reg = EBPF_REG_1,
1245 .src_reg = EBPF_REG_3,
1246 .off = offsetof(struct dummy_offset, u32),
1249 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1250 .dst_reg = EBPF_REG_1,
1251 .src_reg = EBPF_REG_3,
1252 .off = offsetof(struct dummy_offset, u64),
1255 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1256 .dst_reg = EBPF_REG_4,
1260 .code = (BPF_STX | EBPF_XADD | BPF_W),
1261 .dst_reg = EBPF_REG_1,
1262 .src_reg = EBPF_REG_4,
1263 .off = offsetof(struct dummy_offset, u32),
1266 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1267 .dst_reg = EBPF_REG_1,
1268 .src_reg = EBPF_REG_4,
1269 .off = offsetof(struct dummy_offset, u64),
1272 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1273 .dst_reg = EBPF_REG_5,
1277 .code = (BPF_STX | EBPF_XADD | BPF_W),
1278 .dst_reg = EBPF_REG_1,
1279 .src_reg = EBPF_REG_5,
1280 .off = offsetof(struct dummy_offset, u32),
1283 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1284 .dst_reg = EBPF_REG_1,
1285 .src_reg = EBPF_REG_5,
1286 .off = offsetof(struct dummy_offset, u64),
1289 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1290 .dst_reg = EBPF_REG_6,
1294 .code = (BPF_STX | EBPF_XADD | BPF_W),
1295 .dst_reg = EBPF_REG_1,
1296 .src_reg = EBPF_REG_6,
1297 .off = offsetof(struct dummy_offset, u32),
1300 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1301 .dst_reg = EBPF_REG_1,
1302 .src_reg = EBPF_REG_6,
1303 .off = offsetof(struct dummy_offset, u64),
1306 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1307 .dst_reg = EBPF_REG_7,
1311 .code = (BPF_STX | EBPF_XADD | BPF_W),
1312 .dst_reg = EBPF_REG_1,
1313 .src_reg = EBPF_REG_7,
1314 .off = offsetof(struct dummy_offset, u32),
1317 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1318 .dst_reg = EBPF_REG_1,
1319 .src_reg = EBPF_REG_7,
1320 .off = offsetof(struct dummy_offset, u64),
1323 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1324 .dst_reg = EBPF_REG_8,
1328 .code = (BPF_STX | EBPF_XADD | BPF_W),
1329 .dst_reg = EBPF_REG_1,
1330 .src_reg = EBPF_REG_8,
1331 .off = offsetof(struct dummy_offset, u32),
1334 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1335 .dst_reg = EBPF_REG_1,
1336 .src_reg = EBPF_REG_8,
1337 .off = offsetof(struct dummy_offset, u64),
1341 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1342 .dst_reg = EBPF_REG_0,
1346 .code = (BPF_JMP | EBPF_EXIT),
1351 test_xadd1_check(uint64_t rc, const void *arg)
1354 const struct dummy_offset *dft;
1355 struct dummy_offset dfe;
1358 memset(&dfe, 0, sizeof(dfe));
1361 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1362 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1365 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1366 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1368 rv = (int32_t)TEST_FILL_1;
1369 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1370 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1373 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1374 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1377 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1378 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1381 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1382 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1385 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1386 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1388 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1391 /* alu div test-cases */
1392 static const struct ebpf_insn test_div1_prog[] = {
1395 .code = (BPF_LDX | BPF_MEM | BPF_W),
1396 .dst_reg = EBPF_REG_2,
1397 .src_reg = EBPF_REG_1,
1398 .off = offsetof(struct dummy_vect8, in[0].u32),
1401 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1402 .dst_reg = EBPF_REG_3,
1403 .src_reg = EBPF_REG_1,
1404 .off = offsetof(struct dummy_vect8, in[1].u64),
1407 .code = (BPF_LDX | BPF_MEM | BPF_W),
1408 .dst_reg = EBPF_REG_4,
1409 .src_reg = EBPF_REG_1,
1410 .off = offsetof(struct dummy_vect8, in[2].u32),
1413 .code = (BPF_ALU | BPF_DIV | BPF_K),
1414 .dst_reg = EBPF_REG_2,
1418 .code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1419 .dst_reg = EBPF_REG_3,
1423 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1424 .dst_reg = EBPF_REG_2,
1428 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1429 .dst_reg = EBPF_REG_3,
1433 .code = (BPF_ALU | BPF_MOD | BPF_X),
1434 .dst_reg = EBPF_REG_4,
1435 .src_reg = EBPF_REG_2,
1438 .code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1439 .dst_reg = EBPF_REG_4,
1440 .src_reg = EBPF_REG_3,
1443 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1444 .dst_reg = EBPF_REG_1,
1445 .src_reg = EBPF_REG_2,
1446 .off = offsetof(struct dummy_vect8, out[0].u64),
1449 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1450 .dst_reg = EBPF_REG_1,
1451 .src_reg = EBPF_REG_3,
1452 .off = offsetof(struct dummy_vect8, out[1].u64),
1455 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1456 .dst_reg = EBPF_REG_1,
1457 .src_reg = EBPF_REG_4,
1458 .off = offsetof(struct dummy_vect8, out[2].u64),
1460 /* check that we can handle division by zero gracefully. */
1462 .code = (BPF_LDX | BPF_MEM | BPF_W),
1463 .dst_reg = EBPF_REG_2,
1464 .src_reg = EBPF_REG_1,
1465 .off = offsetof(struct dummy_vect8, in[3].u32),
1468 .code = (BPF_ALU | BPF_DIV | BPF_X),
1469 .dst_reg = EBPF_REG_4,
1470 .src_reg = EBPF_REG_2,
1474 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1475 .dst_reg = EBPF_REG_0,
1479 .code = (BPF_JMP | EBPF_EXIT),
1484 test_div1_check(uint64_t rc, const void *arg)
1486 uint64_t r2, r3, r4;
1487 const struct dummy_vect8 *dvt;
1488 struct dummy_vect8 dve;
1491 memset(&dve, 0, sizeof(dve));
1493 r2 = dvt->in[0].u32;
1494 r3 = dvt->in[1].u64;
1495 r4 = dvt->in[2].u32;
1497 r2 = (uint32_t)r2 / TEST_MUL_1;
1501 r4 = (uint32_t)(r4 % r2);
1504 dve.out[0].u64 = r2;
1505 dve.out[1].u64 = r3;
1506 dve.out[2].u64 = r4;
1509 * in the test prog we attempted to divide by zero.
1510 * so return value should return 0.
1512 return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1515 /* call test-cases */
1516 static const struct ebpf_insn test_call1_prog[] = {
1519 .code = (BPF_LDX | BPF_MEM | BPF_W),
1520 .dst_reg = EBPF_REG_2,
1521 .src_reg = EBPF_REG_1,
1522 .off = offsetof(struct dummy_offset, u32),
1525 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1526 .dst_reg = EBPF_REG_3,
1527 .src_reg = EBPF_REG_1,
1528 .off = offsetof(struct dummy_offset, u64),
1531 .code = (BPF_STX | BPF_MEM | BPF_W),
1532 .dst_reg = EBPF_REG_10,
1533 .src_reg = EBPF_REG_2,
1537 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1538 .dst_reg = EBPF_REG_10,
1539 .src_reg = EBPF_REG_3,
1543 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1544 .dst_reg = EBPF_REG_2,
1545 .src_reg = EBPF_REG_10,
1548 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1549 .dst_reg = EBPF_REG_2,
1553 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1554 .dst_reg = EBPF_REG_3,
1555 .src_reg = EBPF_REG_10,
1558 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1559 .dst_reg = EBPF_REG_3,
1563 .code = (BPF_JMP | EBPF_CALL),
1567 .code = (BPF_LDX | BPF_MEM | BPF_W),
1568 .dst_reg = EBPF_REG_2,
1569 .src_reg = EBPF_REG_10,
1573 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1574 .dst_reg = EBPF_REG_0,
1575 .src_reg = EBPF_REG_10,
1579 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1580 .dst_reg = EBPF_REG_0,
1581 .src_reg = EBPF_REG_2,
1584 .code = (BPF_JMP | EBPF_EXIT),
1589 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1591 const struct dummy_offset *dv;
1600 test_call1_check(uint64_t rc, const void *arg)
1604 const struct dummy_offset *dv;
1610 dummy_func1(arg, &v32, &v64);
1614 printf("%s@%d: invalid return value "
1615 "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
1616 __func__, __LINE__, v64, rc);
1620 return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1623 static const struct rte_bpf_xsym test_call1_xsym[] = {
1625 .name = RTE_STR(dummy_func1),
1626 .type = RTE_BPF_XTYPE_FUNC,
1628 .val = (void *)dummy_func1,
1632 .type = RTE_BPF_ARG_PTR,
1633 .size = sizeof(struct dummy_offset),
1636 .type = RTE_BPF_ARG_PTR,
1637 .size = sizeof(uint32_t),
1640 .type = RTE_BPF_ARG_PTR,
1641 .size = sizeof(uint64_t),
1648 static const struct ebpf_insn test_call2_prog[] = {
1651 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1652 .dst_reg = EBPF_REG_1,
1653 .src_reg = EBPF_REG_10,
1656 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1657 .dst_reg = EBPF_REG_1,
1658 .imm = -(int32_t)sizeof(struct dummy_offset),
1661 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1662 .dst_reg = EBPF_REG_2,
1663 .src_reg = EBPF_REG_10,
1666 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1667 .dst_reg = EBPF_REG_2,
1668 .imm = -2 * (int32_t)sizeof(struct dummy_offset),
1671 .code = (BPF_JMP | EBPF_CALL),
1675 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1676 .dst_reg = EBPF_REG_1,
1677 .src_reg = EBPF_REG_10,
1678 .off = -(int32_t)(sizeof(struct dummy_offset) -
1679 offsetof(struct dummy_offset, u64)),
1682 .code = (BPF_LDX | BPF_MEM | BPF_W),
1683 .dst_reg = EBPF_REG_0,
1684 .src_reg = EBPF_REG_10,
1685 .off = -(int32_t)(sizeof(struct dummy_offset) -
1686 offsetof(struct dummy_offset, u32)),
1689 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1690 .dst_reg = EBPF_REG_0,
1691 .src_reg = EBPF_REG_1,
1694 .code = (BPF_LDX | BPF_MEM | BPF_H),
1695 .dst_reg = EBPF_REG_1,
1696 .src_reg = EBPF_REG_10,
1697 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1698 offsetof(struct dummy_offset, u16)),
1701 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1702 .dst_reg = EBPF_REG_0,
1703 .src_reg = EBPF_REG_1,
1706 .code = (BPF_LDX | BPF_MEM | BPF_B),
1707 .dst_reg = EBPF_REG_1,
1708 .src_reg = EBPF_REG_10,
1709 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1710 offsetof(struct dummy_offset, u8)),
1713 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1714 .dst_reg = EBPF_REG_0,
1715 .src_reg = EBPF_REG_1,
1718 .code = (BPF_JMP | EBPF_EXIT),
1724 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1740 test_call2_check(uint64_t rc, const void *arg)
1743 struct dummy_offset a, b;
1747 dummy_func2(&a, &b);
1748 v = a.u64 + a.u32 + b.u16 + b.u8;
1751 printf("%s@%d: invalid return value "
1752 "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
1753 __func__, __LINE__, v, rc);
1759 static const struct rte_bpf_xsym test_call2_xsym[] = {
1761 .name = RTE_STR(dummy_func2),
1762 .type = RTE_BPF_XTYPE_FUNC,
1764 .val = (void *)dummy_func2,
1768 .type = RTE_BPF_ARG_PTR,
1769 .size = sizeof(struct dummy_offset),
1772 .type = RTE_BPF_ARG_PTR,
1773 .size = sizeof(struct dummy_offset),
1780 static const struct bpf_test tests[] = {
1782 .name = "test_store1",
1783 .arg_sz = sizeof(struct dummy_offset),
1785 .ins = test_store1_prog,
1786 .nb_ins = RTE_DIM(test_store1_prog),
1788 .type = RTE_BPF_ARG_PTR,
1789 .size = sizeof(struct dummy_offset),
1792 .prepare = test_store1_prepare,
1793 .check_result = test_store1_check,
1796 .name = "test_store2",
1797 .arg_sz = sizeof(struct dummy_offset),
1799 .ins = test_store2_prog,
1800 .nb_ins = RTE_DIM(test_store2_prog),
1802 .type = RTE_BPF_ARG_PTR,
1803 .size = sizeof(struct dummy_offset),
1806 .prepare = test_store1_prepare,
1807 .check_result = test_store1_check,
1810 .name = "test_load1",
1811 .arg_sz = sizeof(struct dummy_offset),
1813 .ins = test_load1_prog,
1814 .nb_ins = RTE_DIM(test_load1_prog),
1816 .type = RTE_BPF_ARG_PTR,
1817 .size = sizeof(struct dummy_offset),
1820 .prepare = test_load1_prepare,
1821 .check_result = test_load1_check,
1824 .name = "test_ldimm1",
1825 .arg_sz = sizeof(struct dummy_offset),
1827 .ins = test_ldimm1_prog,
1828 .nb_ins = RTE_DIM(test_ldimm1_prog),
1830 .type = RTE_BPF_ARG_PTR,
1831 .size = sizeof(struct dummy_offset),
1834 .prepare = test_store1_prepare,
1835 .check_result = test_ldimm1_check,
1838 .name = "test_mul1",
1839 .arg_sz = sizeof(struct dummy_vect8),
1841 .ins = test_mul1_prog,
1842 .nb_ins = RTE_DIM(test_mul1_prog),
1844 .type = RTE_BPF_ARG_PTR,
1845 .size = sizeof(struct dummy_vect8),
1848 .prepare = test_mul1_prepare,
1849 .check_result = test_mul1_check,
1852 .name = "test_shift1",
1853 .arg_sz = sizeof(struct dummy_vect8),
1855 .ins = test_shift1_prog,
1856 .nb_ins = RTE_DIM(test_shift1_prog),
1858 .type = RTE_BPF_ARG_PTR,
1859 .size = sizeof(struct dummy_vect8),
1862 .prepare = test_shift1_prepare,
1863 .check_result = test_shift1_check,
1866 .name = "test_jump1",
1867 .arg_sz = sizeof(struct dummy_vect8),
1869 .ins = test_jump1_prog,
1870 .nb_ins = RTE_DIM(test_jump1_prog),
1872 .type = RTE_BPF_ARG_PTR,
1873 .size = sizeof(struct dummy_vect8),
1876 .prepare = test_jump1_prepare,
1877 .check_result = test_jump1_check,
1880 .name = "test_alu1",
1881 .arg_sz = sizeof(struct dummy_vect8),
1883 .ins = test_alu1_prog,
1884 .nb_ins = RTE_DIM(test_alu1_prog),
1886 .type = RTE_BPF_ARG_PTR,
1887 .size = sizeof(struct dummy_vect8),
1890 .prepare = test_jump1_prepare,
1891 .check_result = test_alu1_check,
1894 .name = "test_bele1",
1895 .arg_sz = sizeof(struct dummy_vect8),
1897 .ins = test_bele1_prog,
1898 .nb_ins = RTE_DIM(test_bele1_prog),
1900 .type = RTE_BPF_ARG_PTR,
1901 .size = sizeof(struct dummy_vect8),
1904 .prepare = test_bele1_prepare,
1905 .check_result = test_bele1_check,
1908 .name = "test_xadd1",
1909 .arg_sz = sizeof(struct dummy_offset),
1911 .ins = test_xadd1_prog,
1912 .nb_ins = RTE_DIM(test_xadd1_prog),
1914 .type = RTE_BPF_ARG_PTR,
1915 .size = sizeof(struct dummy_offset),
1918 .prepare = test_store1_prepare,
1919 .check_result = test_xadd1_check,
1922 .name = "test_div1",
1923 .arg_sz = sizeof(struct dummy_vect8),
1925 .ins = test_div1_prog,
1926 .nb_ins = RTE_DIM(test_div1_prog),
1928 .type = RTE_BPF_ARG_PTR,
1929 .size = sizeof(struct dummy_vect8),
1932 .prepare = test_mul1_prepare,
1933 .check_result = test_div1_check,
1936 .name = "test_call1",
1937 .arg_sz = sizeof(struct dummy_offset),
1939 .ins = test_call1_prog,
1940 .nb_ins = RTE_DIM(test_call1_prog),
1942 .type = RTE_BPF_ARG_PTR,
1943 .size = sizeof(struct dummy_offset),
1945 .xsym = test_call1_xsym,
1946 .nb_xsym = RTE_DIM(test_call1_xsym),
1948 .prepare = test_load1_prepare,
1949 .check_result = test_call1_check,
1950 /* for now don't support function calls on 32 bit platform */
1951 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
1954 .name = "test_call2",
1955 .arg_sz = sizeof(struct dummy_offset),
1957 .ins = test_call2_prog,
1958 .nb_ins = RTE_DIM(test_call2_prog),
1960 .type = RTE_BPF_ARG_PTR,
1961 .size = sizeof(struct dummy_offset),
1963 .xsym = test_call2_xsym,
1964 .nb_xsym = RTE_DIM(test_call2_xsym),
1966 .prepare = test_store1_prepare,
1967 .check_result = test_call2_check,
1968 /* for now don't support function calls on 32 bit platform */
1969 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
1974 run_test(const struct bpf_test *tst)
1978 struct rte_bpf *bpf;
1979 struct rte_bpf_jit jit;
1980 uint8_t tbuf[tst->arg_sz];
1982 printf("%s(%s) start\n", __func__, tst->name);
1984 bpf = rte_bpf_load(&tst->prm);
1986 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
1987 __func__, __LINE__, rte_errno, strerror(rte_errno));
1993 rc = rte_bpf_exec(bpf, tbuf);
1994 ret = tst->check_result(rc, tbuf);
1996 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
1997 __func__, __LINE__, tst->name, ret, strerror(ret));
2000 rte_bpf_get_jit(bpf, &jit);
2001 if (jit.func == NULL)
2005 rc = jit.func(tbuf);
2006 rv = tst->check_result(rc, tbuf);
2009 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
2010 __func__, __LINE__, tst->name, rv, strerror(ret));
2013 rte_bpf_destroy(bpf);
2025 for (i = 0; i != RTE_DIM(tests); i++) {
2026 rv = run_test(tests + i);
2027 if (tests[i].allow_fail == 0)
2034 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);