1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <rte_memory.h>
11 #include <rte_debug.h>
12 #include <rte_hexdump.h>
13 #include <rte_random.h>
14 #include <rte_byteorder.h>
15 #include <rte_errno.h>
17 #include <rte_ether.h>
23 * Basic functional tests for librte_bpf.
24 * The main procedure - load eBPF program, execute it and
25 * compare restuls with expected values.
36 struct dummy_offset in[8];
37 struct dummy_offset out[8];
41 struct rte_ether_hdr eth_hdr;
42 struct rte_vlan_hdr vlan_hdr;
43 struct rte_ipv4_hdr ip_hdr;
46 #define DUMMY_MBUF_NUM 2
48 /* first mbuf in the packet, should always be at offset 0 */
50 struct rte_mbuf mb[DUMMY_MBUF_NUM];
51 uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE];
54 #define TEST_FILL_1 0xDEADBEEF
57 #define TEST_MUL_2 -100
59 #define TEST_SHIFT_1 15
60 #define TEST_SHIFT_2 33
63 #define TEST_JCC_2 -123
64 #define TEST_JCC_3 5678
65 #define TEST_JCC_4 TEST_FILL_1
67 #define TEST_IMM_1 UINT64_MAX
68 #define TEST_IMM_2 ((uint64_t)INT64_MIN)
69 #define TEST_IMM_3 ((uint64_t)INT64_MAX + INT32_MAX)
70 #define TEST_IMM_4 ((uint64_t)UINT32_MAX)
71 #define TEST_IMM_5 ((uint64_t)UINT32_MAX + 1)
73 #define TEST_MEMFROB 0x2a2a2a2a
75 #define STRING_GEEK 0x6B656567
76 #define STRING_WEEK 0x6B656577
78 #define TEST_NETMASK 0xffffff00
79 #define TEST_SUBNET 0xaca80200
81 uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
82 uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
84 uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1;
85 uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2;
90 struct rte_bpf_prm prm;
91 void (*prepare)(void *);
92 int (*check_result)(uint64_t, const void *);
97 * Compare return value and result data with expected ones.
98 * Report a failure if they don't match.
101 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
102 const void *exp_res, const void *ret_res, size_t res_sz)
107 if (exp_rc != ret_rc) {
108 printf("%s@%d: invalid return value, expected: 0x%" PRIx64
109 ",result: 0x%" PRIx64 "\n",
110 func, __LINE__, exp_rc, ret_rc);
114 if (memcmp(exp_res, ret_res, res_sz) != 0) {
115 printf("%s: invalid value\n", func);
116 rte_memdump(stdout, "expected", exp_res, res_sz);
117 rte_memdump(stdout, "result", ret_res, res_sz);
124 /* store immediate test-cases */
125 static const struct ebpf_insn test_store1_prog[] = {
127 .code = (BPF_ST | BPF_MEM | BPF_B),
128 .dst_reg = EBPF_REG_1,
129 .off = offsetof(struct dummy_offset, u8),
133 .code = (BPF_ST | BPF_MEM | BPF_H),
134 .dst_reg = EBPF_REG_1,
135 .off = offsetof(struct dummy_offset, u16),
139 .code = (BPF_ST | BPF_MEM | BPF_W),
140 .dst_reg = EBPF_REG_1,
141 .off = offsetof(struct dummy_offset, u32),
145 .code = (BPF_ST | BPF_MEM | EBPF_DW),
146 .dst_reg = EBPF_REG_1,
147 .off = offsetof(struct dummy_offset, u64),
152 .code = (BPF_ALU | EBPF_MOV | BPF_K),
153 .dst_reg = EBPF_REG_0,
157 .code = (BPF_JMP | EBPF_EXIT),
162 test_store1_prepare(void *arg)
164 struct dummy_offset *df;
167 memset(df, 0, sizeof(*df));
171 test_store1_check(uint64_t rc, const void *arg)
173 const struct dummy_offset *dft;
174 struct dummy_offset dfe;
178 memset(&dfe, 0, sizeof(dfe));
179 dfe.u64 = (int32_t)TEST_FILL_1;
184 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
187 /* store register test-cases */
188 static const struct ebpf_insn test_store2_prog[] = {
191 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
192 .dst_reg = EBPF_REG_2,
196 .code = (BPF_STX | BPF_MEM | BPF_B),
197 .dst_reg = EBPF_REG_1,
198 .src_reg = EBPF_REG_2,
199 .off = offsetof(struct dummy_offset, u8),
202 .code = (BPF_STX | BPF_MEM | BPF_H),
203 .dst_reg = EBPF_REG_1,
204 .src_reg = EBPF_REG_2,
205 .off = offsetof(struct dummy_offset, u16),
208 .code = (BPF_STX | BPF_MEM | BPF_W),
209 .dst_reg = EBPF_REG_1,
210 .src_reg = EBPF_REG_2,
211 .off = offsetof(struct dummy_offset, u32),
214 .code = (BPF_STX | BPF_MEM | EBPF_DW),
215 .dst_reg = EBPF_REG_1,
216 .src_reg = EBPF_REG_2,
217 .off = offsetof(struct dummy_offset, u64),
221 .code = (BPF_ALU | EBPF_MOV | BPF_K),
222 .dst_reg = EBPF_REG_0,
226 .code = (BPF_JMP | EBPF_EXIT),
230 /* load test-cases */
231 static const struct ebpf_insn test_load1_prog[] = {
234 .code = (BPF_LDX | BPF_MEM | BPF_B),
235 .dst_reg = EBPF_REG_2,
236 .src_reg = EBPF_REG_1,
237 .off = offsetof(struct dummy_offset, u8),
240 .code = (BPF_LDX | BPF_MEM | BPF_H),
241 .dst_reg = EBPF_REG_3,
242 .src_reg = EBPF_REG_1,
243 .off = offsetof(struct dummy_offset, u16),
246 .code = (BPF_LDX | BPF_MEM | BPF_W),
247 .dst_reg = EBPF_REG_4,
248 .src_reg = EBPF_REG_1,
249 .off = offsetof(struct dummy_offset, u32),
252 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
253 .dst_reg = EBPF_REG_0,
254 .src_reg = EBPF_REG_1,
255 .off = offsetof(struct dummy_offset, u64),
259 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
260 .dst_reg = EBPF_REG_0,
261 .src_reg = EBPF_REG_4,
264 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
265 .dst_reg = EBPF_REG_0,
266 .src_reg = EBPF_REG_3,
269 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
270 .dst_reg = EBPF_REG_0,
271 .src_reg = EBPF_REG_2,
274 .code = (BPF_JMP | EBPF_EXIT),
279 test_load1_prepare(void *arg)
281 struct dummy_offset *df;
285 memset(df, 0, sizeof(*df));
286 df->u64 = (int32_t)TEST_FILL_1;
293 test_load1_check(uint64_t rc, const void *arg)
296 const struct dummy_offset *dft;
304 return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
307 /* load immediate test-cases */
308 static const struct ebpf_insn test_ldimm1_prog[] = {
311 .code = (BPF_LD | BPF_IMM | EBPF_DW),
312 .dst_reg = EBPF_REG_0,
313 .imm = (uint32_t)TEST_IMM_1,
316 .imm = TEST_IMM_1 >> 32,
319 .code = (BPF_LD | BPF_IMM | EBPF_DW),
320 .dst_reg = EBPF_REG_3,
321 .imm = (uint32_t)TEST_IMM_2,
324 .imm = TEST_IMM_2 >> 32,
327 .code = (BPF_LD | BPF_IMM | EBPF_DW),
328 .dst_reg = EBPF_REG_5,
329 .imm = (uint32_t)TEST_IMM_3,
332 .imm = TEST_IMM_3 >> 32,
335 .code = (BPF_LD | BPF_IMM | EBPF_DW),
336 .dst_reg = EBPF_REG_7,
337 .imm = (uint32_t)TEST_IMM_4,
340 .imm = TEST_IMM_4 >> 32,
343 .code = (BPF_LD | BPF_IMM | EBPF_DW),
344 .dst_reg = EBPF_REG_9,
345 .imm = (uint32_t)TEST_IMM_5,
348 .imm = TEST_IMM_5 >> 32,
352 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
353 .dst_reg = EBPF_REG_0,
354 .src_reg = EBPF_REG_3,
357 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
358 .dst_reg = EBPF_REG_0,
359 .src_reg = EBPF_REG_5,
362 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
363 .dst_reg = EBPF_REG_0,
364 .src_reg = EBPF_REG_7,
367 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
368 .dst_reg = EBPF_REG_0,
369 .src_reg = EBPF_REG_9,
372 .code = (BPF_JMP | EBPF_EXIT),
377 test_ldimm1_check(uint64_t rc, const void *arg)
391 return cmp_res(__func__, v1, rc, arg, arg, 0);
395 /* alu mul test-cases */
396 static const struct ebpf_insn test_mul1_prog[] = {
399 .code = (BPF_LDX | BPF_MEM | BPF_W),
400 .dst_reg = EBPF_REG_2,
401 .src_reg = EBPF_REG_1,
402 .off = offsetof(struct dummy_vect8, in[0].u32),
405 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
406 .dst_reg = EBPF_REG_3,
407 .src_reg = EBPF_REG_1,
408 .off = offsetof(struct dummy_vect8, in[1].u64),
411 .code = (BPF_LDX | BPF_MEM | BPF_W),
412 .dst_reg = EBPF_REG_4,
413 .src_reg = EBPF_REG_1,
414 .off = offsetof(struct dummy_vect8, in[2].u32),
417 .code = (BPF_ALU | BPF_MUL | BPF_K),
418 .dst_reg = EBPF_REG_2,
422 .code = (EBPF_ALU64 | BPF_MUL | BPF_K),
423 .dst_reg = EBPF_REG_3,
427 .code = (BPF_ALU | BPF_MUL | BPF_X),
428 .dst_reg = EBPF_REG_4,
429 .src_reg = EBPF_REG_2,
432 .code = (EBPF_ALU64 | BPF_MUL | BPF_X),
433 .dst_reg = EBPF_REG_4,
434 .src_reg = EBPF_REG_3,
437 .code = (BPF_STX | BPF_MEM | EBPF_DW),
438 .dst_reg = EBPF_REG_1,
439 .src_reg = EBPF_REG_2,
440 .off = offsetof(struct dummy_vect8, out[0].u64),
443 .code = (BPF_STX | BPF_MEM | EBPF_DW),
444 .dst_reg = EBPF_REG_1,
445 .src_reg = EBPF_REG_3,
446 .off = offsetof(struct dummy_vect8, out[1].u64),
449 .code = (BPF_STX | BPF_MEM | EBPF_DW),
450 .dst_reg = EBPF_REG_1,
451 .src_reg = EBPF_REG_4,
452 .off = offsetof(struct dummy_vect8, out[2].u64),
456 .code = (BPF_ALU | EBPF_MOV | BPF_K),
457 .dst_reg = EBPF_REG_0,
461 .code = (BPF_JMP | EBPF_EXIT),
466 test_mul1_prepare(void *arg)
468 struct dummy_vect8 *dv;
475 memset(dv, 0, sizeof(*dv));
477 dv->in[1].u64 = v << 12 | v >> 6;
482 test_mul1_check(uint64_t rc, const void *arg)
485 const struct dummy_vect8 *dvt;
486 struct dummy_vect8 dve;
489 memset(&dve, 0, sizeof(dve));
495 r2 = (uint32_t)r2 * TEST_MUL_1;
497 r4 = (uint32_t)(r4 * r2);
504 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
507 /* alu shift test-cases */
508 static const struct ebpf_insn test_shift1_prog[] = {
511 .code = (BPF_LDX | BPF_MEM | BPF_W),
512 .dst_reg = EBPF_REG_2,
513 .src_reg = EBPF_REG_1,
514 .off = offsetof(struct dummy_vect8, in[0].u32),
517 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
518 .dst_reg = EBPF_REG_3,
519 .src_reg = EBPF_REG_1,
520 .off = offsetof(struct dummy_vect8, in[1].u64),
523 .code = (BPF_LDX | BPF_MEM | BPF_W),
524 .dst_reg = EBPF_REG_4,
525 .src_reg = EBPF_REG_1,
526 .off = offsetof(struct dummy_vect8, in[2].u32),
529 .code = (BPF_ALU | BPF_LSH | BPF_K),
530 .dst_reg = EBPF_REG_2,
534 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
535 .dst_reg = EBPF_REG_3,
539 .code = (BPF_STX | BPF_MEM | EBPF_DW),
540 .dst_reg = EBPF_REG_1,
541 .src_reg = EBPF_REG_2,
542 .off = offsetof(struct dummy_vect8, out[0].u64),
545 .code = (BPF_STX | BPF_MEM | EBPF_DW),
546 .dst_reg = EBPF_REG_1,
547 .src_reg = EBPF_REG_3,
548 .off = offsetof(struct dummy_vect8, out[1].u64),
551 .code = (BPF_ALU | BPF_RSH | BPF_X),
552 .dst_reg = EBPF_REG_2,
553 .src_reg = EBPF_REG_4,
556 .code = (EBPF_ALU64 | BPF_LSH | BPF_X),
557 .dst_reg = EBPF_REG_3,
558 .src_reg = EBPF_REG_4,
561 .code = (BPF_STX | BPF_MEM | EBPF_DW),
562 .dst_reg = EBPF_REG_1,
563 .src_reg = EBPF_REG_2,
564 .off = offsetof(struct dummy_vect8, out[2].u64),
567 .code = (BPF_STX | BPF_MEM | EBPF_DW),
568 .dst_reg = EBPF_REG_1,
569 .src_reg = EBPF_REG_3,
570 .off = offsetof(struct dummy_vect8, out[3].u64),
573 .code = (BPF_LDX | BPF_MEM | BPF_W),
574 .dst_reg = EBPF_REG_2,
575 .src_reg = EBPF_REG_1,
576 .off = offsetof(struct dummy_vect8, in[0].u32),
579 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
580 .dst_reg = EBPF_REG_3,
581 .src_reg = EBPF_REG_1,
582 .off = offsetof(struct dummy_vect8, in[1].u64),
585 .code = (BPF_LDX | BPF_MEM | BPF_W),
586 .dst_reg = EBPF_REG_4,
587 .src_reg = EBPF_REG_1,
588 .off = offsetof(struct dummy_vect8, in[2].u32),
591 .code = (BPF_ALU | BPF_AND | BPF_K),
592 .dst_reg = EBPF_REG_2,
593 .imm = sizeof(uint64_t) * CHAR_BIT - 1,
596 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
597 .dst_reg = EBPF_REG_3,
598 .src_reg = EBPF_REG_2,
601 .code = (BPF_ALU | BPF_AND | BPF_K),
602 .dst_reg = EBPF_REG_2,
603 .imm = sizeof(uint32_t) * CHAR_BIT - 1,
606 .code = (BPF_ALU | BPF_LSH | BPF_X),
607 .dst_reg = EBPF_REG_4,
608 .src_reg = EBPF_REG_2,
611 .code = (BPF_STX | BPF_MEM | EBPF_DW),
612 .dst_reg = EBPF_REG_1,
613 .src_reg = EBPF_REG_4,
614 .off = offsetof(struct dummy_vect8, out[4].u64),
617 .code = (BPF_STX | BPF_MEM | EBPF_DW),
618 .dst_reg = EBPF_REG_1,
619 .src_reg = EBPF_REG_3,
620 .off = offsetof(struct dummy_vect8, out[5].u64),
624 .code = (BPF_ALU | EBPF_MOV | BPF_K),
625 .dst_reg = EBPF_REG_0,
629 .code = (BPF_JMP | EBPF_EXIT),
634 test_shift1_prepare(void *arg)
636 struct dummy_vect8 *dv;
643 memset(dv, 0, sizeof(*dv));
645 dv->in[1].u64 = v << 12 | v >> 6;
646 dv->in[2].u32 = (-v ^ 5);
650 test_shift1_check(uint64_t rc, const void *arg)
653 const struct dummy_vect8 *dvt;
654 struct dummy_vect8 dve;
657 memset(&dve, 0, sizeof(dve));
663 r2 = (uint32_t)r2 << TEST_SHIFT_1;
664 r3 = (int64_t)r3 >> TEST_SHIFT_2;
669 r2 = (uint32_t)r2 >> r4;
679 r2 &= sizeof(uint64_t) * CHAR_BIT - 1;
680 r3 = (int64_t)r3 >> r2;
681 r2 &= sizeof(uint32_t) * CHAR_BIT - 1;
682 r4 = (uint32_t)r4 << r2;
687 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
691 static const struct ebpf_insn test_jump1_prog[] = {
694 .code = (BPF_ALU | EBPF_MOV | BPF_K),
695 .dst_reg = EBPF_REG_0,
699 .code = (BPF_LDX | BPF_MEM | BPF_W),
700 .dst_reg = EBPF_REG_2,
701 .src_reg = EBPF_REG_1,
702 .off = offsetof(struct dummy_vect8, in[0].u32),
705 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
706 .dst_reg = EBPF_REG_3,
707 .src_reg = EBPF_REG_1,
708 .off = offsetof(struct dummy_vect8, in[0].u64),
711 .code = (BPF_LDX | BPF_MEM | BPF_W),
712 .dst_reg = EBPF_REG_4,
713 .src_reg = EBPF_REG_1,
714 .off = offsetof(struct dummy_vect8, in[1].u32),
717 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
718 .dst_reg = EBPF_REG_5,
719 .src_reg = EBPF_REG_1,
720 .off = offsetof(struct dummy_vect8, in[1].u64),
723 .code = (BPF_JMP | BPF_JEQ | BPF_K),
724 .dst_reg = EBPF_REG_2,
729 .code = (BPF_JMP | EBPF_JSLE | BPF_K),
730 .dst_reg = EBPF_REG_3,
735 .code = (BPF_JMP | BPF_JGT | BPF_K),
736 .dst_reg = EBPF_REG_4,
741 .code = (BPF_JMP | BPF_JSET | BPF_K),
742 .dst_reg = EBPF_REG_5,
747 .code = (BPF_JMP | EBPF_JNE | BPF_X),
748 .dst_reg = EBPF_REG_2,
749 .src_reg = EBPF_REG_3,
753 .code = (BPF_JMP | EBPF_JSGT | BPF_X),
754 .dst_reg = EBPF_REG_2,
755 .src_reg = EBPF_REG_4,
759 .code = (BPF_JMP | EBPF_JLE | BPF_X),
760 .dst_reg = EBPF_REG_2,
761 .src_reg = EBPF_REG_5,
765 .code = (BPF_JMP | BPF_JSET | BPF_X),
766 .dst_reg = EBPF_REG_3,
767 .src_reg = EBPF_REG_5,
771 .code = (BPF_JMP | EBPF_EXIT),
774 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
775 .dst_reg = EBPF_REG_0,
779 .code = (BPF_JMP | BPF_JA),
783 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
784 .dst_reg = EBPF_REG_0,
788 .code = (BPF_JMP | BPF_JA),
792 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
793 .dst_reg = EBPF_REG_0,
797 .code = (BPF_JMP | BPF_JA),
801 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
802 .dst_reg = EBPF_REG_0,
806 .code = (BPF_JMP | BPF_JA),
810 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
811 .dst_reg = EBPF_REG_0,
815 .code = (BPF_JMP | BPF_JA),
819 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
820 .dst_reg = EBPF_REG_0,
824 .code = (BPF_JMP | BPF_JA),
828 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
829 .dst_reg = EBPF_REG_0,
833 .code = (BPF_JMP | BPF_JA),
837 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
838 .dst_reg = EBPF_REG_0,
842 .code = (BPF_JMP | BPF_JA),
848 test_jump1_prepare(void *arg)
850 struct dummy_vect8 *dv;
858 memset(dv, 0, sizeof(*dv));
861 dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
862 dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
866 test_jump1_check(uint64_t rc, const void *arg)
868 uint64_t r2, r3, r4, r5, rv;
869 const struct dummy_vect8 *dvt;
879 if (r2 == TEST_JCC_1)
881 if ((int64_t)r3 <= TEST_JCC_2)
889 if ((int64_t)r2 > (int64_t)r4)
896 return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
899 /* Jump test case - check ip4_dest in particular subnet */
900 static const struct ebpf_insn test_jump2_prog[] = {
903 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
904 .dst_reg = EBPF_REG_2,
908 .code = (BPF_LDX | BPF_MEM | BPF_H),
909 .dst_reg = EBPF_REG_3,
910 .src_reg = EBPF_REG_1,
914 .code = (BPF_JMP | EBPF_JNE | BPF_K),
915 .dst_reg = EBPF_REG_3,
920 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
921 .dst_reg = EBPF_REG_2,
925 .code = (BPF_LDX | BPF_MEM | BPF_H),
926 .dst_reg = EBPF_REG_3,
927 .src_reg = EBPF_REG_1,
931 .code = (EBPF_ALU64 | BPF_AND | BPF_K),
932 .dst_reg = EBPF_REG_3,
936 .code = (BPF_JMP | EBPF_JNE | BPF_K),
937 .dst_reg = EBPF_REG_3,
942 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
943 .dst_reg = EBPF_REG_1,
944 .src_reg = EBPF_REG_2,
947 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
948 .dst_reg = EBPF_REG_0,
952 .code = (BPF_LDX | BPF_MEM | BPF_W),
953 .dst_reg = EBPF_REG_1,
954 .src_reg = EBPF_REG_1,
958 .code = (BPF_ALU | EBPF_MOV | BPF_K),
959 .dst_reg = EBPF_REG_3,
963 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
964 .dst_reg = EBPF_REG_3,
965 .imm = sizeof(uint32_t) * CHAR_BIT,
968 .code = (BPF_ALU | BPF_AND | BPF_X),
969 .dst_reg = EBPF_REG_1,
970 .src_reg = EBPF_REG_3,
973 .code = (BPF_ALU | EBPF_MOV | BPF_K),
974 .dst_reg = EBPF_REG_3,
978 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
979 .dst_reg = EBPF_REG_3,
980 .imm = sizeof(uint32_t) * CHAR_BIT,
983 .code = (BPF_JMP | BPF_JEQ | BPF_X),
984 .dst_reg = EBPF_REG_1,
985 .src_reg = EBPF_REG_3,
989 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
990 .dst_reg = EBPF_REG_0,
994 .code = (BPF_JMP | EBPF_EXIT),
998 /* Preparing a vlan packet */
1000 test_jump2_prepare(void *arg)
1002 struct dummy_net *dn;
1005 memset(dn, 0, sizeof(*dn));
1008 * Initialize ether header.
1010 rte_ether_addr_copy((struct rte_ether_addr *)dst_mac,
1011 &dn->eth_hdr.d_addr);
1012 rte_ether_addr_copy((struct rte_ether_addr *)src_mac,
1013 &dn->eth_hdr.s_addr);
1014 dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1017 * Initialize vlan header.
1019 dn->vlan_hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1020 dn->vlan_hdr.vlan_tci = 32;
1023 * Initialize IP header.
1025 dn->ip_hdr.version_ihl = 0x45; /*IP_VERSION | IP_HDRLEN*/
1026 dn->ip_hdr.time_to_live = 64; /* IP_DEFTTL */
1027 dn->ip_hdr.next_proto_id = IPPROTO_TCP;
1028 dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c);
1029 dn->ip_hdr.total_length = rte_cpu_to_be_16(60);
1030 dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr);
1031 dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr);
1035 test_jump2_check(uint64_t rc, const void *arg)
1037 const struct rte_ether_hdr *eth_hdr = arg;
1038 const struct rte_ipv4_hdr *ipv4_hdr;
1039 const void *next = eth_hdr;
1043 if (eth_hdr->ether_type == htons(0x8100)) {
1044 const struct rte_vlan_hdr *vlan_hdr =
1045 (const void *)(eth_hdr + 1);
1046 eth_type = vlan_hdr->eth_proto;
1047 next = vlan_hdr + 1;
1049 eth_type = eth_hdr->ether_type;
1053 if (eth_type == htons(0x0800)) {
1055 if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) ==
1056 rte_cpu_to_be_32(TEST_SUBNET)) {
1061 return cmp_res(__func__, v, rc, arg, arg, sizeof(arg));
1064 /* alu (add, sub, and, or, xor, neg) test-cases */
1065 static const struct ebpf_insn test_alu1_prog[] = {
1068 .code = (BPF_LDX | BPF_MEM | BPF_W),
1069 .dst_reg = EBPF_REG_2,
1070 .src_reg = EBPF_REG_1,
1071 .off = offsetof(struct dummy_vect8, in[0].u32),
1074 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1075 .dst_reg = EBPF_REG_3,
1076 .src_reg = EBPF_REG_1,
1077 .off = offsetof(struct dummy_vect8, in[0].u64),
1080 .code = (BPF_LDX | BPF_MEM | BPF_W),
1081 .dst_reg = EBPF_REG_4,
1082 .src_reg = EBPF_REG_1,
1083 .off = offsetof(struct dummy_vect8, in[1].u32),
1086 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1087 .dst_reg = EBPF_REG_5,
1088 .src_reg = EBPF_REG_1,
1089 .off = offsetof(struct dummy_vect8, in[1].u64),
1092 .code = (BPF_ALU | BPF_AND | BPF_K),
1093 .dst_reg = EBPF_REG_2,
1097 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1098 .dst_reg = EBPF_REG_3,
1102 .code = (BPF_ALU | BPF_XOR | BPF_K),
1103 .dst_reg = EBPF_REG_4,
1107 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1108 .dst_reg = EBPF_REG_5,
1112 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1113 .dst_reg = EBPF_REG_1,
1114 .src_reg = EBPF_REG_2,
1115 .off = offsetof(struct dummy_vect8, out[0].u64),
1118 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1119 .dst_reg = EBPF_REG_1,
1120 .src_reg = EBPF_REG_3,
1121 .off = offsetof(struct dummy_vect8, out[1].u64),
1124 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1125 .dst_reg = EBPF_REG_1,
1126 .src_reg = EBPF_REG_4,
1127 .off = offsetof(struct dummy_vect8, out[2].u64),
1130 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1131 .dst_reg = EBPF_REG_1,
1132 .src_reg = EBPF_REG_5,
1133 .off = offsetof(struct dummy_vect8, out[3].u64),
1136 .code = (BPF_ALU | BPF_OR | BPF_X),
1137 .dst_reg = EBPF_REG_2,
1138 .src_reg = EBPF_REG_3,
1141 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
1142 .dst_reg = EBPF_REG_3,
1143 .src_reg = EBPF_REG_4,
1146 .code = (BPF_ALU | BPF_SUB | BPF_X),
1147 .dst_reg = EBPF_REG_4,
1148 .src_reg = EBPF_REG_5,
1151 .code = (EBPF_ALU64 | BPF_AND | BPF_X),
1152 .dst_reg = EBPF_REG_5,
1153 .src_reg = EBPF_REG_2,
1156 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1157 .dst_reg = EBPF_REG_1,
1158 .src_reg = EBPF_REG_2,
1159 .off = offsetof(struct dummy_vect8, out[4].u64),
1162 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1163 .dst_reg = EBPF_REG_1,
1164 .src_reg = EBPF_REG_3,
1165 .off = offsetof(struct dummy_vect8, out[5].u64),
1168 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1169 .dst_reg = EBPF_REG_1,
1170 .src_reg = EBPF_REG_4,
1171 .off = offsetof(struct dummy_vect8, out[6].u64),
1174 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1175 .dst_reg = EBPF_REG_1,
1176 .src_reg = EBPF_REG_5,
1177 .off = offsetof(struct dummy_vect8, out[7].u64),
1179 /* return (-r2 + (-r3)) */
1181 .code = (BPF_ALU | BPF_NEG),
1182 .dst_reg = EBPF_REG_2,
1185 .code = (EBPF_ALU64 | BPF_NEG),
1186 .dst_reg = EBPF_REG_3,
1189 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1190 .dst_reg = EBPF_REG_2,
1191 .src_reg = EBPF_REG_3,
1194 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1195 .dst_reg = EBPF_REG_0,
1196 .src_reg = EBPF_REG_2,
1199 .code = (BPF_JMP | EBPF_EXIT),
1204 test_alu1_check(uint64_t rc, const void *arg)
1206 uint64_t r2, r3, r4, r5, rv;
1207 const struct dummy_vect8 *dvt;
1208 struct dummy_vect8 dve;
1211 memset(&dve, 0, sizeof(dve));
1213 r2 = dvt->in[0].u32;
1214 r3 = dvt->in[0].u64;
1215 r4 = dvt->in[1].u32;
1216 r5 = dvt->in[1].u64;
1218 r2 = (uint32_t)r2 & TEST_FILL_1;
1219 r3 |= (int32_t) TEST_FILL_1;
1220 r4 = (uint32_t)r4 ^ TEST_FILL_1;
1221 r5 += (int32_t)TEST_FILL_1;
1223 dve.out[0].u64 = r2;
1224 dve.out[1].u64 = r3;
1225 dve.out[2].u64 = r4;
1226 dve.out[3].u64 = r5;
1228 r2 = (uint32_t)r2 | (uint32_t)r3;
1230 r4 = (uint32_t)r4 - (uint32_t)r5;
1233 dve.out[4].u64 = r2;
1234 dve.out[5].u64 = r3;
1235 dve.out[6].u64 = r4;
1236 dve.out[7].u64 = r5;
1243 return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
1246 /* endianness conversions (BE->LE/LE->BE) test-cases */
1247 static const struct ebpf_insn test_bele1_prog[] = {
1250 .code = (BPF_LDX | BPF_MEM | BPF_H),
1251 .dst_reg = EBPF_REG_2,
1252 .src_reg = EBPF_REG_1,
1253 .off = offsetof(struct dummy_vect8, in[0].u16),
1256 .code = (BPF_LDX | BPF_MEM | BPF_W),
1257 .dst_reg = EBPF_REG_3,
1258 .src_reg = EBPF_REG_1,
1259 .off = offsetof(struct dummy_vect8, in[0].u32),
1262 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1263 .dst_reg = EBPF_REG_4,
1264 .src_reg = EBPF_REG_1,
1265 .off = offsetof(struct dummy_vect8, in[0].u64),
1268 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1269 .dst_reg = EBPF_REG_2,
1270 .imm = sizeof(uint16_t) * CHAR_BIT,
1273 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1274 .dst_reg = EBPF_REG_3,
1275 .imm = sizeof(uint32_t) * CHAR_BIT,
1278 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
1279 .dst_reg = EBPF_REG_4,
1280 .imm = sizeof(uint64_t) * CHAR_BIT,
1283 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1284 .dst_reg = EBPF_REG_1,
1285 .src_reg = EBPF_REG_2,
1286 .off = offsetof(struct dummy_vect8, out[0].u64),
1289 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1290 .dst_reg = EBPF_REG_1,
1291 .src_reg = EBPF_REG_3,
1292 .off = offsetof(struct dummy_vect8, out[1].u64),
1295 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1296 .dst_reg = EBPF_REG_1,
1297 .src_reg = EBPF_REG_4,
1298 .off = offsetof(struct dummy_vect8, out[2].u64),
1301 .code = (BPF_LDX | BPF_MEM | BPF_H),
1302 .dst_reg = EBPF_REG_2,
1303 .src_reg = EBPF_REG_1,
1304 .off = offsetof(struct dummy_vect8, in[0].u16),
1307 .code = (BPF_LDX | BPF_MEM | BPF_W),
1308 .dst_reg = EBPF_REG_3,
1309 .src_reg = EBPF_REG_1,
1310 .off = offsetof(struct dummy_vect8, in[0].u32),
1313 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1314 .dst_reg = EBPF_REG_4,
1315 .src_reg = EBPF_REG_1,
1316 .off = offsetof(struct dummy_vect8, in[0].u64),
1319 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1320 .dst_reg = EBPF_REG_2,
1321 .imm = sizeof(uint16_t) * CHAR_BIT,
1324 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1325 .dst_reg = EBPF_REG_3,
1326 .imm = sizeof(uint32_t) * CHAR_BIT,
1329 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
1330 .dst_reg = EBPF_REG_4,
1331 .imm = sizeof(uint64_t) * CHAR_BIT,
1334 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1335 .dst_reg = EBPF_REG_1,
1336 .src_reg = EBPF_REG_2,
1337 .off = offsetof(struct dummy_vect8, out[3].u64),
1340 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1341 .dst_reg = EBPF_REG_1,
1342 .src_reg = EBPF_REG_3,
1343 .off = offsetof(struct dummy_vect8, out[4].u64),
1346 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1347 .dst_reg = EBPF_REG_1,
1348 .src_reg = EBPF_REG_4,
1349 .off = offsetof(struct dummy_vect8, out[5].u64),
1353 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1354 .dst_reg = EBPF_REG_0,
1358 .code = (BPF_JMP | EBPF_EXIT),
1363 test_bele1_prepare(void *arg)
1365 struct dummy_vect8 *dv;
1369 memset(dv, 0, sizeof(*dv));
1370 dv->in[0].u64 = rte_rand();
1371 dv->in[0].u32 = dv->in[0].u64;
1372 dv->in[0].u16 = dv->in[0].u64;
1376 test_bele1_check(uint64_t rc, const void *arg)
1378 uint64_t r2, r3, r4;
1379 const struct dummy_vect8 *dvt;
1380 struct dummy_vect8 dve;
1383 memset(&dve, 0, sizeof(dve));
1385 r2 = dvt->in[0].u16;
1386 r3 = dvt->in[0].u32;
1387 r4 = dvt->in[0].u64;
1389 r2 = rte_cpu_to_be_16(r2);
1390 r3 = rte_cpu_to_be_32(r3);
1391 r4 = rte_cpu_to_be_64(r4);
1393 dve.out[0].u64 = r2;
1394 dve.out[1].u64 = r3;
1395 dve.out[2].u64 = r4;
1397 r2 = dvt->in[0].u16;
1398 r3 = dvt->in[0].u32;
1399 r4 = dvt->in[0].u64;
1401 r2 = rte_cpu_to_le_16(r2);
1402 r3 = rte_cpu_to_le_32(r3);
1403 r4 = rte_cpu_to_le_64(r4);
1405 dve.out[3].u64 = r2;
1406 dve.out[4].u64 = r3;
1407 dve.out[5].u64 = r4;
1409 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
1412 /* atomic add test-cases */
1413 static const struct ebpf_insn test_xadd1_prog[] = {
1416 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1417 .dst_reg = EBPF_REG_2,
1421 .code = (BPF_STX | EBPF_XADD | BPF_W),
1422 .dst_reg = EBPF_REG_1,
1423 .src_reg = EBPF_REG_2,
1424 .off = offsetof(struct dummy_offset, u32),
1427 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1428 .dst_reg = EBPF_REG_1,
1429 .src_reg = EBPF_REG_2,
1430 .off = offsetof(struct dummy_offset, u64),
1433 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1434 .dst_reg = EBPF_REG_3,
1438 .code = (BPF_STX | EBPF_XADD | BPF_W),
1439 .dst_reg = EBPF_REG_1,
1440 .src_reg = EBPF_REG_3,
1441 .off = offsetof(struct dummy_offset, u32),
1444 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1445 .dst_reg = EBPF_REG_1,
1446 .src_reg = EBPF_REG_3,
1447 .off = offsetof(struct dummy_offset, u64),
1450 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1451 .dst_reg = EBPF_REG_4,
1455 .code = (BPF_STX | EBPF_XADD | BPF_W),
1456 .dst_reg = EBPF_REG_1,
1457 .src_reg = EBPF_REG_4,
1458 .off = offsetof(struct dummy_offset, u32),
1461 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1462 .dst_reg = EBPF_REG_1,
1463 .src_reg = EBPF_REG_4,
1464 .off = offsetof(struct dummy_offset, u64),
1467 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1468 .dst_reg = EBPF_REG_5,
1472 .code = (BPF_STX | EBPF_XADD | BPF_W),
1473 .dst_reg = EBPF_REG_1,
1474 .src_reg = EBPF_REG_5,
1475 .off = offsetof(struct dummy_offset, u32),
1478 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1479 .dst_reg = EBPF_REG_1,
1480 .src_reg = EBPF_REG_5,
1481 .off = offsetof(struct dummy_offset, u64),
1484 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1485 .dst_reg = EBPF_REG_6,
1489 .code = (BPF_STX | EBPF_XADD | BPF_W),
1490 .dst_reg = EBPF_REG_1,
1491 .src_reg = EBPF_REG_6,
1492 .off = offsetof(struct dummy_offset, u32),
1495 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1496 .dst_reg = EBPF_REG_1,
1497 .src_reg = EBPF_REG_6,
1498 .off = offsetof(struct dummy_offset, u64),
1501 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1502 .dst_reg = EBPF_REG_7,
1506 .code = (BPF_STX | EBPF_XADD | BPF_W),
1507 .dst_reg = EBPF_REG_1,
1508 .src_reg = EBPF_REG_7,
1509 .off = offsetof(struct dummy_offset, u32),
1512 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1513 .dst_reg = EBPF_REG_1,
1514 .src_reg = EBPF_REG_7,
1515 .off = offsetof(struct dummy_offset, u64),
1518 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
1519 .dst_reg = EBPF_REG_8,
1523 .code = (BPF_STX | EBPF_XADD | BPF_W),
1524 .dst_reg = EBPF_REG_1,
1525 .src_reg = EBPF_REG_8,
1526 .off = offsetof(struct dummy_offset, u32),
1529 .code = (BPF_STX | EBPF_XADD | EBPF_DW),
1530 .dst_reg = EBPF_REG_1,
1531 .src_reg = EBPF_REG_8,
1532 .off = offsetof(struct dummy_offset, u64),
1536 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1537 .dst_reg = EBPF_REG_0,
1541 .code = (BPF_JMP | EBPF_EXIT),
1546 test_xadd1_check(uint64_t rc, const void *arg)
1549 const struct dummy_offset *dft;
1550 struct dummy_offset dfe;
1553 memset(&dfe, 0, sizeof(dfe));
1556 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1557 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1560 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1561 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1563 rv = (int32_t)TEST_FILL_1;
1564 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1565 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1568 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1569 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1572 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1573 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1576 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1577 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1580 rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
1581 rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
1583 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
1586 /* alu div test-cases */
1587 static const struct ebpf_insn test_div1_prog[] = {
1590 .code = (BPF_LDX | BPF_MEM | BPF_W),
1591 .dst_reg = EBPF_REG_2,
1592 .src_reg = EBPF_REG_1,
1593 .off = offsetof(struct dummy_vect8, in[0].u32),
1596 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1597 .dst_reg = EBPF_REG_3,
1598 .src_reg = EBPF_REG_1,
1599 .off = offsetof(struct dummy_vect8, in[1].u64),
1602 .code = (BPF_LDX | BPF_MEM | BPF_W),
1603 .dst_reg = EBPF_REG_4,
1604 .src_reg = EBPF_REG_1,
1605 .off = offsetof(struct dummy_vect8, in[2].u32),
1608 .code = (BPF_ALU | BPF_DIV | BPF_K),
1609 .dst_reg = EBPF_REG_2,
1613 .code = (EBPF_ALU64 | BPF_MOD | BPF_K),
1614 .dst_reg = EBPF_REG_3,
1618 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1619 .dst_reg = EBPF_REG_2,
1623 .code = (EBPF_ALU64 | BPF_OR | BPF_K),
1624 .dst_reg = EBPF_REG_3,
1628 .code = (BPF_ALU | BPF_MOD | BPF_X),
1629 .dst_reg = EBPF_REG_4,
1630 .src_reg = EBPF_REG_2,
1633 .code = (EBPF_ALU64 | BPF_DIV | BPF_X),
1634 .dst_reg = EBPF_REG_4,
1635 .src_reg = EBPF_REG_3,
1638 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1639 .dst_reg = EBPF_REG_1,
1640 .src_reg = EBPF_REG_2,
1641 .off = offsetof(struct dummy_vect8, out[0].u64),
1644 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1645 .dst_reg = EBPF_REG_1,
1646 .src_reg = EBPF_REG_3,
1647 .off = offsetof(struct dummy_vect8, out[1].u64),
1650 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1651 .dst_reg = EBPF_REG_1,
1652 .src_reg = EBPF_REG_4,
1653 .off = offsetof(struct dummy_vect8, out[2].u64),
1655 /* check that we can handle division by zero gracefully. */
1657 .code = (BPF_LDX | BPF_MEM | BPF_W),
1658 .dst_reg = EBPF_REG_2,
1659 .src_reg = EBPF_REG_1,
1660 .off = offsetof(struct dummy_vect8, in[3].u32),
1663 .code = (BPF_ALU | BPF_DIV | BPF_X),
1664 .dst_reg = EBPF_REG_4,
1665 .src_reg = EBPF_REG_2,
1669 .code = (BPF_ALU | EBPF_MOV | BPF_K),
1670 .dst_reg = EBPF_REG_0,
1674 .code = (BPF_JMP | EBPF_EXIT),
1679 test_div1_check(uint64_t rc, const void *arg)
1681 uint64_t r2, r3, r4;
1682 const struct dummy_vect8 *dvt;
1683 struct dummy_vect8 dve;
1686 memset(&dve, 0, sizeof(dve));
1688 r2 = dvt->in[0].u32;
1689 r3 = dvt->in[1].u64;
1690 r4 = dvt->in[2].u32;
1692 r2 = (uint32_t)r2 / TEST_MUL_1;
1696 r4 = (uint32_t)(r4 % r2);
1699 dve.out[0].u64 = r2;
1700 dve.out[1].u64 = r3;
1701 dve.out[2].u64 = r4;
1704 * in the test prog we attempted to divide by zero.
1705 * so return value should return 0.
1707 return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
1710 /* call test-cases */
1711 static const struct ebpf_insn test_call1_prog[] = {
1714 .code = (BPF_LDX | BPF_MEM | BPF_W),
1715 .dst_reg = EBPF_REG_2,
1716 .src_reg = EBPF_REG_1,
1717 .off = offsetof(struct dummy_offset, u32),
1720 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1721 .dst_reg = EBPF_REG_3,
1722 .src_reg = EBPF_REG_1,
1723 .off = offsetof(struct dummy_offset, u64),
1726 .code = (BPF_STX | BPF_MEM | BPF_W),
1727 .dst_reg = EBPF_REG_10,
1728 .src_reg = EBPF_REG_2,
1732 .code = (BPF_STX | BPF_MEM | EBPF_DW),
1733 .dst_reg = EBPF_REG_10,
1734 .src_reg = EBPF_REG_3,
1738 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1739 .dst_reg = EBPF_REG_2,
1740 .src_reg = EBPF_REG_10,
1743 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1744 .dst_reg = EBPF_REG_2,
1748 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1749 .dst_reg = EBPF_REG_3,
1750 .src_reg = EBPF_REG_10,
1753 .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
1754 .dst_reg = EBPF_REG_3,
1758 .code = (BPF_JMP | EBPF_CALL),
1762 .code = (BPF_LDX | BPF_MEM | BPF_W),
1763 .dst_reg = EBPF_REG_2,
1764 .src_reg = EBPF_REG_10,
1768 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1769 .dst_reg = EBPF_REG_0,
1770 .src_reg = EBPF_REG_10,
1774 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1775 .dst_reg = EBPF_REG_0,
1776 .src_reg = EBPF_REG_2,
1779 .code = (BPF_JMP | EBPF_EXIT),
1784 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
1786 const struct dummy_offset *dv;
1795 test_call1_check(uint64_t rc, const void *arg)
1799 const struct dummy_offset *dv;
1805 dummy_func1(arg, &v32, &v64);
1808 return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
1811 static const struct rte_bpf_xsym test_call1_xsym[] = {
1813 .name = RTE_STR(dummy_func1),
1814 .type = RTE_BPF_XTYPE_FUNC,
1816 .val = (void *)dummy_func1,
1820 .type = RTE_BPF_ARG_PTR,
1821 .size = sizeof(struct dummy_offset),
1824 .type = RTE_BPF_ARG_PTR,
1825 .size = sizeof(uint32_t),
1828 .type = RTE_BPF_ARG_PTR,
1829 .size = sizeof(uint64_t),
1836 static const struct ebpf_insn test_call2_prog[] = {
1839 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1840 .dst_reg = EBPF_REG_1,
1841 .src_reg = EBPF_REG_10,
1844 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1845 .dst_reg = EBPF_REG_1,
1846 .imm = -(int32_t)sizeof(struct dummy_offset),
1849 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
1850 .dst_reg = EBPF_REG_2,
1851 .src_reg = EBPF_REG_10,
1854 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
1855 .dst_reg = EBPF_REG_2,
1856 .imm = -2 * (int32_t)sizeof(struct dummy_offset),
1859 .code = (BPF_JMP | EBPF_CALL),
1863 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1864 .dst_reg = EBPF_REG_1,
1865 .src_reg = EBPF_REG_10,
1866 .off = -(int32_t)(sizeof(struct dummy_offset) -
1867 offsetof(struct dummy_offset, u64)),
1870 .code = (BPF_LDX | BPF_MEM | BPF_W),
1871 .dst_reg = EBPF_REG_0,
1872 .src_reg = EBPF_REG_10,
1873 .off = -(int32_t)(sizeof(struct dummy_offset) -
1874 offsetof(struct dummy_offset, u32)),
1877 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1878 .dst_reg = EBPF_REG_0,
1879 .src_reg = EBPF_REG_1,
1882 .code = (BPF_LDX | BPF_MEM | BPF_H),
1883 .dst_reg = EBPF_REG_1,
1884 .src_reg = EBPF_REG_10,
1885 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1886 offsetof(struct dummy_offset, u16)),
1889 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1890 .dst_reg = EBPF_REG_0,
1891 .src_reg = EBPF_REG_1,
1894 .code = (BPF_LDX | BPF_MEM | BPF_B),
1895 .dst_reg = EBPF_REG_1,
1896 .src_reg = EBPF_REG_10,
1897 .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
1898 offsetof(struct dummy_offset, u8)),
1901 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1902 .dst_reg = EBPF_REG_0,
1903 .src_reg = EBPF_REG_1,
1906 .code = (BPF_JMP | EBPF_EXIT),
1912 dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
1928 test_call2_check(uint64_t rc, const void *arg)
1931 struct dummy_offset a, b;
1935 dummy_func2(&a, &b);
1936 v = a.u64 + a.u32 + b.u16 + b.u8;
1938 return cmp_res(__func__, v, rc, arg, arg, 0);
1941 static const struct rte_bpf_xsym test_call2_xsym[] = {
1943 .name = RTE_STR(dummy_func2),
1944 .type = RTE_BPF_XTYPE_FUNC,
1946 .val = (void *)dummy_func2,
1950 .type = RTE_BPF_ARG_PTR,
1951 .size = sizeof(struct dummy_offset),
1954 .type = RTE_BPF_ARG_PTR,
1955 .size = sizeof(struct dummy_offset),
1962 static const struct ebpf_insn test_call3_prog[] = {
1965 .code = (BPF_JMP | EBPF_CALL),
1969 .code = (BPF_LDX | BPF_MEM | BPF_B),
1970 .dst_reg = EBPF_REG_2,
1971 .src_reg = EBPF_REG_0,
1972 .off = offsetof(struct dummy_offset, u8),
1975 .code = (BPF_LDX | BPF_MEM | BPF_H),
1976 .dst_reg = EBPF_REG_3,
1977 .src_reg = EBPF_REG_0,
1978 .off = offsetof(struct dummy_offset, u16),
1981 .code = (BPF_LDX | BPF_MEM | BPF_W),
1982 .dst_reg = EBPF_REG_4,
1983 .src_reg = EBPF_REG_0,
1984 .off = offsetof(struct dummy_offset, u32),
1987 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
1988 .dst_reg = EBPF_REG_0,
1989 .src_reg = EBPF_REG_0,
1990 .off = offsetof(struct dummy_offset, u64),
1994 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
1995 .dst_reg = EBPF_REG_0,
1996 .src_reg = EBPF_REG_4,
1999 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2000 .dst_reg = EBPF_REG_0,
2001 .src_reg = EBPF_REG_3,
2004 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2005 .dst_reg = EBPF_REG_0,
2006 .src_reg = EBPF_REG_2,
2009 .code = (BPF_JMP | EBPF_EXIT),
2013 static const struct dummy_offset *
2014 dummy_func3(const struct dummy_vect8 *p)
2016 return &p->in[RTE_DIM(p->in) - 1];
2020 test_call3_prepare(void *arg)
2022 struct dummy_vect8 *pv;
2023 struct dummy_offset *df;
2026 df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv);
2028 memset(pv, 0, sizeof(*pv));
2029 df->u64 = (int32_t)TEST_FILL_1;
2036 test_call3_check(uint64_t rc, const void *arg)
2039 const struct dummy_vect8 *pv;
2040 const struct dummy_offset *dft;
2043 dft = dummy_func3(pv);
2050 return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv));
2053 static const struct rte_bpf_xsym test_call3_xsym[] = {
2055 .name = RTE_STR(dummy_func3),
2056 .type = RTE_BPF_XTYPE_FUNC,
2058 .val = (void *)dummy_func3,
2062 .type = RTE_BPF_ARG_PTR,
2063 .size = sizeof(struct dummy_vect8),
2067 .type = RTE_BPF_ARG_PTR,
2068 .size = sizeof(struct dummy_offset),
2074 /* Test for stack corruption in multiple function calls */
2075 static const struct ebpf_insn test_call4_prog[] = {
2077 .code = (BPF_ST | BPF_MEM | BPF_B),
2078 .dst_reg = EBPF_REG_10,
2083 .code = (BPF_ST | BPF_MEM | BPF_B),
2084 .dst_reg = EBPF_REG_10,
2089 .code = (BPF_ST | BPF_MEM | BPF_B),
2090 .dst_reg = EBPF_REG_10,
2095 .code = (BPF_ST | BPF_MEM | BPF_B),
2096 .dst_reg = EBPF_REG_10,
2101 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2102 .dst_reg = EBPF_REG_1,
2103 .src_reg = EBPF_REG_10,
2106 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2107 .dst_reg = EBPF_REG_2,
2111 .code = (EBPF_ALU64 | BPF_SUB | BPF_X),
2112 .dst_reg = EBPF_REG_1,
2113 .src_reg = EBPF_REG_2,
2116 .code = (BPF_JMP | EBPF_CALL),
2120 .code = (BPF_LDX | BPF_MEM | BPF_B),
2121 .dst_reg = EBPF_REG_1,
2122 .src_reg = EBPF_REG_10,
2126 .code = (BPF_LDX | BPF_MEM | BPF_B),
2127 .dst_reg = EBPF_REG_2,
2128 .src_reg = EBPF_REG_10,
2132 .code = (BPF_LDX | BPF_MEM | BPF_B),
2133 .dst_reg = EBPF_REG_3,
2134 .src_reg = EBPF_REG_10,
2138 .code = (BPF_LDX | BPF_MEM | BPF_B),
2139 .dst_reg = EBPF_REG_4,
2140 .src_reg = EBPF_REG_10,
2144 .code = (BPF_JMP | EBPF_CALL),
2148 .code = (EBPF_ALU64 | BPF_XOR | BPF_K),
2149 .dst_reg = EBPF_REG_0,
2150 .imm = TEST_MEMFROB,
2153 .code = (BPF_JMP | EBPF_EXIT),
2157 /* Gathering the bytes together */
2159 dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
2161 return (a << 24) | (b << 16) | (c << 8) | (d << 0);
2164 /* Implementation of memfrob */
2166 dummy_func4_0(uint32_t *s, uint8_t n)
2168 char *p = (char *) s;
2176 test_call4_check(uint64_t rc, const void *arg)
2178 uint8_t a[4] = {1, 2, 3, 4};
2183 s = dummy_func4_0((uint32_t *)a, 4);
2185 s = dummy_func4_1(a[0], a[1], a[2], a[3]);
2187 v = s ^ TEST_MEMFROB;
2189 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2192 static const struct rte_bpf_xsym test_call4_xsym[] = {
2194 .name = RTE_STR(dummy_func4_0),
2195 .type = RTE_BPF_XTYPE_FUNC,
2197 .val = (void *)dummy_func4_0,
2201 .type = RTE_BPF_ARG_PTR,
2202 .size = 4 * sizeof(uint8_t),
2205 .type = RTE_BPF_ARG_RAW,
2206 .size = sizeof(uint8_t),
2210 .type = RTE_BPF_ARG_RAW,
2211 .size = sizeof(uint32_t),
2216 .name = RTE_STR(dummy_func4_1),
2217 .type = RTE_BPF_XTYPE_FUNC,
2219 .val = (void *)dummy_func4_1,
2223 .type = RTE_BPF_ARG_RAW,
2224 .size = sizeof(uint8_t),
2227 .type = RTE_BPF_ARG_RAW,
2228 .size = sizeof(uint8_t),
2231 .type = RTE_BPF_ARG_RAW,
2232 .size = sizeof(uint8_t),
2235 .type = RTE_BPF_ARG_RAW,
2236 .size = sizeof(uint8_t),
2240 .type = RTE_BPF_ARG_RAW,
2241 .size = sizeof(uint32_t),
2247 /* string compare test case */
2248 static const struct ebpf_insn test_call5_prog[] = {
2251 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2252 .dst_reg = EBPF_REG_1,
2256 .code = (BPF_STX | BPF_MEM | BPF_W),
2257 .dst_reg = EBPF_REG_10,
2258 .src_reg = EBPF_REG_1,
2262 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2263 .dst_reg = EBPF_REG_6,
2267 .code = (BPF_STX | BPF_MEM | BPF_B),
2268 .dst_reg = EBPF_REG_10,
2269 .src_reg = EBPF_REG_6,
2273 .code = (BPF_STX | BPF_MEM | BPF_W),
2274 .dst_reg = EBPF_REG_10,
2275 .src_reg = EBPF_REG_6,
2279 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2280 .dst_reg = EBPF_REG_1,
2284 .code = (BPF_STX | BPF_MEM | BPF_W),
2285 .dst_reg = EBPF_REG_10,
2286 .src_reg = EBPF_REG_1,
2290 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2291 .dst_reg = EBPF_REG_1,
2292 .src_reg = EBPF_REG_10,
2295 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2296 .dst_reg = EBPF_REG_1,
2300 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2301 .dst_reg = EBPF_REG_2,
2302 .src_reg = EBPF_REG_1,
2305 .code = (BPF_JMP | EBPF_CALL),
2309 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2310 .dst_reg = EBPF_REG_1,
2311 .src_reg = EBPF_REG_0,
2314 .code = (BPF_ALU | EBPF_MOV | BPF_K),
2315 .dst_reg = EBPF_REG_0,
2319 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2320 .dst_reg = EBPF_REG_1,
2324 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2325 .dst_reg = EBPF_REG_1,
2329 .code = (BPF_JMP | EBPF_JNE | BPF_K),
2330 .dst_reg = EBPF_REG_1,
2335 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2336 .dst_reg = EBPF_REG_1,
2337 .src_reg = EBPF_REG_10,
2340 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2341 .dst_reg = EBPF_REG_1,
2345 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2346 .dst_reg = EBPF_REG_2,
2347 .src_reg = EBPF_REG_10,
2350 .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
2351 .dst_reg = EBPF_REG_2,
2355 .code = (BPF_JMP | EBPF_CALL),
2359 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2360 .dst_reg = EBPF_REG_1,
2361 .src_reg = EBPF_REG_0,
2364 .code = (EBPF_ALU64 | BPF_LSH | BPF_K),
2365 .dst_reg = EBPF_REG_1,
2369 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2370 .dst_reg = EBPF_REG_1,
2374 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2375 .dst_reg = EBPF_REG_0,
2376 .src_reg = EBPF_REG_1,
2379 .code = (BPF_JMP | BPF_JEQ | BPF_X),
2380 .dst_reg = EBPF_REG_1,
2381 .src_reg = EBPF_REG_6,
2385 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
2386 .dst_reg = EBPF_REG_0,
2390 .code = (BPF_JMP | EBPF_EXIT),
2394 /* String comparision impelementation, return 0 if equal else difference */
2396 dummy_func5(const char *s1, const char *s2)
2398 while (*s1 && (*s1 == *s2)) {
2402 return *(const unsigned char *)s1 - *(const unsigned char *)s2;
2406 test_call5_check(uint64_t rc, const void *arg)
2414 v = dummy_func5(a, a);
2420 v = dummy_func5(a, b);
2427 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
2430 static const struct rte_bpf_xsym test_call5_xsym[] = {
2432 .name = RTE_STR(dummy_func5),
2433 .type = RTE_BPF_XTYPE_FUNC,
2435 .val = (void *)dummy_func5,
2439 .type = RTE_BPF_ARG_PTR,
2440 .size = sizeof(char),
2443 .type = RTE_BPF_ARG_PTR,
2444 .size = sizeof(char),
2448 .type = RTE_BPF_ARG_RAW,
2449 .size = sizeof(uint32_t),
2455 /* load mbuf (BPF_ABS/BPF_IND) test-cases */
2456 static const struct ebpf_insn test_ld_mbuf1_prog[] = {
2458 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2460 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2461 .dst_reg = EBPF_REG_6,
2462 .src_reg = EBPF_REG_1,
2464 /* load IPv4 version and IHL */
2466 .code = (BPF_LD | BPF_ABS | BPF_B),
2467 .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2469 /* check IP version */
2471 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2472 .dst_reg = EBPF_REG_2,
2473 .src_reg = EBPF_REG_0,
2476 .code = (BPF_ALU | BPF_AND | BPF_K),
2477 .dst_reg = EBPF_REG_2,
2481 .code = (BPF_JMP | BPF_JEQ | BPF_K),
2482 .dst_reg = EBPF_REG_2,
2483 .imm = IPVERSION << 4,
2486 /* invalid IP version, return 0 */
2488 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2489 .dst_reg = EBPF_REG_0,
2490 .src_reg = EBPF_REG_0,
2493 .code = (BPF_JMP | EBPF_EXIT),
2495 /* load 3-rd byte of IP data */
2497 .code = (BPF_ALU | BPF_AND | BPF_K),
2498 .dst_reg = EBPF_REG_0,
2499 .imm = RTE_IPV4_HDR_IHL_MASK,
2502 .code = (BPF_ALU | BPF_LSH | BPF_K),
2503 .dst_reg = EBPF_REG_0,
2507 .code = (BPF_LD | BPF_IND | BPF_B),
2508 .src_reg = EBPF_REG_0,
2512 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2513 .dst_reg = EBPF_REG_7,
2514 .src_reg = EBPF_REG_0,
2516 /* load IPv4 src addr */
2518 .code = (BPF_LD | BPF_ABS | BPF_W),
2519 .imm = offsetof(struct rte_ipv4_hdr, src_addr),
2522 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2523 .dst_reg = EBPF_REG_7,
2524 .src_reg = EBPF_REG_0,
2526 /* load IPv4 total length */
2528 .code = (BPF_LD | BPF_ABS | BPF_H),
2529 .imm = offsetof(struct rte_ipv4_hdr, total_length),
2532 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2533 .dst_reg = EBPF_REG_8,
2534 .src_reg = EBPF_REG_0,
2536 /* load last 4 bytes of IP data */
2538 .code = (BPF_LD | BPF_IND | BPF_W),
2539 .src_reg = EBPF_REG_8,
2540 .imm = -(int32_t)sizeof(uint32_t),
2543 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2544 .dst_reg = EBPF_REG_7,
2545 .src_reg = EBPF_REG_0,
2547 /* load 2 bytes from the middle of IP data */
2549 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2550 .dst_reg = EBPF_REG_8,
2554 .code = (BPF_LD | BPF_IND | BPF_H),
2555 .src_reg = EBPF_REG_8,
2558 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2559 .dst_reg = EBPF_REG_0,
2560 .src_reg = EBPF_REG_7,
2563 .code = (BPF_JMP | EBPF_EXIT),
2568 dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
2575 mb->buf_iova = (uintptr_t)buf;
2576 mb->buf_len = buf_len;
2577 rte_mbuf_refcnt_set(mb, 1);
2579 /* set pool pointer to dummy value, test doesn't use it */
2580 mb->pool = (void *)buf;
2582 rte_pktmbuf_reset(mb);
2583 db = (uint8_t *)rte_pktmbuf_append(mb, data_len);
2585 for (i = 0; i != data_len; i++)
2590 test_ld_mbuf1_prepare(void *arg)
2592 struct dummy_mbuf *dm;
2593 struct rte_ipv4_hdr *ph;
2595 const uint32_t plen = 400;
2596 const struct rte_ipv4_hdr iph = {
2597 .version_ihl = RTE_IPV4_VHL_DEF,
2598 .total_length = rte_cpu_to_be_16(plen),
2599 .time_to_live = IPDEFTTL,
2600 .next_proto_id = IPPROTO_RAW,
2601 .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
2602 .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
2606 memset(dm, 0, sizeof(*dm));
2608 dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]),
2610 dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]),
2613 rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]);
2615 ph = rte_pktmbuf_mtod(dm->mb, typeof(ph));
2616 memcpy(ph, &iph, sizeof(iph));
2620 test_ld_mbuf1(const struct rte_mbuf *pkt)
2624 const uint16_t *p16;
2625 const uint32_t *p32;
2626 struct dummy_offset dof;
2628 /* load IPv4 version and IHL */
2629 p8 = rte_pktmbuf_read(pkt,
2630 offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8),
2635 /* check IP version */
2636 if ((p8[0] & 0xf0) != IPVERSION << 4)
2639 n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
2641 /* load 3-rd byte of IP data */
2642 p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof);
2648 /* load IPv4 src addr */
2649 p32 = rte_pktmbuf_read(pkt,
2650 offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32),
2655 v += rte_be_to_cpu_32(p32[0]);
2657 /* load IPv4 total length */
2658 p16 = rte_pktmbuf_read(pkt,
2659 offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16),
2664 n = rte_be_to_cpu_16(p16[0]);
2666 /* load last 4 bytes of IP data */
2667 p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof);
2671 v += rte_be_to_cpu_32(p32[0]);
2673 /* load 2 bytes from the middle of IP data */
2674 p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof);
2678 v += rte_be_to_cpu_16(p16[0]);
2683 test_ld_mbuf1_check(uint64_t rc, const void *arg)
2685 const struct dummy_mbuf *dm;
2689 v = test_ld_mbuf1(dm->mb);
2690 return cmp_res(__func__, v, rc, arg, arg, 0);
2694 * same as ld_mbuf1, but then trancate the mbuf by 1B,
2695 * so load of last 4B fail.
2698 test_ld_mbuf2_prepare(void *arg)
2700 struct dummy_mbuf *dm;
2702 test_ld_mbuf1_prepare(arg);
2704 rte_pktmbuf_trim(dm->mb, 1);
2708 test_ld_mbuf2_check(uint64_t rc, const void *arg)
2710 return cmp_res(__func__, 0, rc, arg, arg, 0);
2713 /* same as test_ld_mbuf1, but now store intermediate results on the stack */
2714 static const struct ebpf_insn test_ld_mbuf3_prog[] = {
2716 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
2718 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2719 .dst_reg = EBPF_REG_6,
2720 .src_reg = EBPF_REG_1,
2722 /* load IPv4 version and IHL */
2724 .code = (BPF_LD | BPF_ABS | BPF_B),
2725 .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
2727 /* check IP version */
2729 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2730 .dst_reg = EBPF_REG_2,
2731 .src_reg = EBPF_REG_0,
2734 .code = (BPF_ALU | BPF_AND | BPF_K),
2735 .dst_reg = EBPF_REG_2,
2739 .code = (BPF_JMP | BPF_JEQ | BPF_K),
2740 .dst_reg = EBPF_REG_2,
2741 .imm = IPVERSION << 4,
2744 /* invalid IP version, return 0 */
2746 .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
2747 .dst_reg = EBPF_REG_0,
2748 .src_reg = EBPF_REG_0,
2751 .code = (BPF_JMP | EBPF_EXIT),
2753 /* load 3-rd byte of IP data */
2755 .code = (BPF_ALU | BPF_AND | BPF_K),
2756 .dst_reg = EBPF_REG_0,
2757 .imm = RTE_IPV4_HDR_IHL_MASK,
2760 .code = (BPF_ALU | BPF_LSH | BPF_K),
2761 .dst_reg = EBPF_REG_0,
2765 .code = (BPF_LD | BPF_IND | BPF_B),
2766 .src_reg = EBPF_REG_0,
2770 .code = (BPF_STX | BPF_MEM | BPF_B),
2771 .dst_reg = EBPF_REG_10,
2772 .src_reg = EBPF_REG_0,
2773 .off = (int16_t)(offsetof(struct dummy_offset, u8) -
2774 sizeof(struct dummy_offset)),
2776 /* load IPv4 src addr */
2778 .code = (BPF_LD | BPF_ABS | BPF_W),
2779 .imm = offsetof(struct rte_ipv4_hdr, src_addr),
2782 .code = (BPF_STX | BPF_MEM | BPF_W),
2783 .dst_reg = EBPF_REG_10,
2784 .src_reg = EBPF_REG_0,
2785 .off = (int16_t)(offsetof(struct dummy_offset, u32) -
2786 sizeof(struct dummy_offset)),
2788 /* load IPv4 total length */
2790 .code = (BPF_LD | BPF_ABS | BPF_H),
2791 .imm = offsetof(struct rte_ipv4_hdr, total_length),
2794 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
2795 .dst_reg = EBPF_REG_8,
2796 .src_reg = EBPF_REG_0,
2798 /* load last 4 bytes of IP data */
2800 .code = (BPF_LD | BPF_IND | BPF_W),
2801 .src_reg = EBPF_REG_8,
2802 .imm = -(int32_t)sizeof(uint32_t),
2805 .code = (BPF_STX | BPF_MEM | EBPF_DW),
2806 .dst_reg = EBPF_REG_10,
2807 .src_reg = EBPF_REG_0,
2808 .off = (int16_t)(offsetof(struct dummy_offset, u64) -
2809 sizeof(struct dummy_offset)),
2811 /* load 2 bytes from the middle of IP data */
2813 .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
2814 .dst_reg = EBPF_REG_8,
2818 .code = (BPF_LD | BPF_IND | BPF_H),
2819 .src_reg = EBPF_REG_8,
2822 .code = (BPF_LDX | BPF_MEM | EBPF_DW),
2823 .dst_reg = EBPF_REG_1,
2824 .src_reg = EBPF_REG_10,
2825 .off = (int16_t)(offsetof(struct dummy_offset, u64) -
2826 sizeof(struct dummy_offset)),
2829 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2830 .dst_reg = EBPF_REG_0,
2831 .src_reg = EBPF_REG_1,
2834 .code = (BPF_LDX | BPF_MEM | BPF_W),
2835 .dst_reg = EBPF_REG_1,
2836 .src_reg = EBPF_REG_10,
2837 .off = (int16_t)(offsetof(struct dummy_offset, u32) -
2838 sizeof(struct dummy_offset)),
2841 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2842 .dst_reg = EBPF_REG_0,
2843 .src_reg = EBPF_REG_1,
2846 .code = (BPF_LDX | BPF_MEM | BPF_B),
2847 .dst_reg = EBPF_REG_1,
2848 .src_reg = EBPF_REG_10,
2849 .off = (int16_t)(offsetof(struct dummy_offset, u8) -
2850 sizeof(struct dummy_offset)),
2853 .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
2854 .dst_reg = EBPF_REG_0,
2855 .src_reg = EBPF_REG_1,
2858 .code = (BPF_JMP | EBPF_EXIT),
2862 /* all bpf test cases */
2863 static const struct bpf_test tests[] = {
2865 .name = "test_store1",
2866 .arg_sz = sizeof(struct dummy_offset),
2868 .ins = test_store1_prog,
2869 .nb_ins = RTE_DIM(test_store1_prog),
2871 .type = RTE_BPF_ARG_PTR,
2872 .size = sizeof(struct dummy_offset),
2875 .prepare = test_store1_prepare,
2876 .check_result = test_store1_check,
2879 .name = "test_store2",
2880 .arg_sz = sizeof(struct dummy_offset),
2882 .ins = test_store2_prog,
2883 .nb_ins = RTE_DIM(test_store2_prog),
2885 .type = RTE_BPF_ARG_PTR,
2886 .size = sizeof(struct dummy_offset),
2889 .prepare = test_store1_prepare,
2890 .check_result = test_store1_check,
2893 .name = "test_load1",
2894 .arg_sz = sizeof(struct dummy_offset),
2896 .ins = test_load1_prog,
2897 .nb_ins = RTE_DIM(test_load1_prog),
2899 .type = RTE_BPF_ARG_PTR,
2900 .size = sizeof(struct dummy_offset),
2903 .prepare = test_load1_prepare,
2904 .check_result = test_load1_check,
2907 .name = "test_ldimm1",
2908 .arg_sz = sizeof(struct dummy_offset),
2910 .ins = test_ldimm1_prog,
2911 .nb_ins = RTE_DIM(test_ldimm1_prog),
2913 .type = RTE_BPF_ARG_PTR,
2914 .size = sizeof(struct dummy_offset),
2917 .prepare = test_store1_prepare,
2918 .check_result = test_ldimm1_check,
2921 .name = "test_mul1",
2922 .arg_sz = sizeof(struct dummy_vect8),
2924 .ins = test_mul1_prog,
2925 .nb_ins = RTE_DIM(test_mul1_prog),
2927 .type = RTE_BPF_ARG_PTR,
2928 .size = sizeof(struct dummy_vect8),
2931 .prepare = test_mul1_prepare,
2932 .check_result = test_mul1_check,
2935 .name = "test_shift1",
2936 .arg_sz = sizeof(struct dummy_vect8),
2938 .ins = test_shift1_prog,
2939 .nb_ins = RTE_DIM(test_shift1_prog),
2941 .type = RTE_BPF_ARG_PTR,
2942 .size = sizeof(struct dummy_vect8),
2945 .prepare = test_shift1_prepare,
2946 .check_result = test_shift1_check,
2949 .name = "test_jump1",
2950 .arg_sz = sizeof(struct dummy_vect8),
2952 .ins = test_jump1_prog,
2953 .nb_ins = RTE_DIM(test_jump1_prog),
2955 .type = RTE_BPF_ARG_PTR,
2956 .size = sizeof(struct dummy_vect8),
2959 .prepare = test_jump1_prepare,
2960 .check_result = test_jump1_check,
2963 .name = "test_jump2",
2964 .arg_sz = sizeof(struct dummy_net),
2966 .ins = test_jump2_prog,
2967 .nb_ins = RTE_DIM(test_jump2_prog),
2969 .type = RTE_BPF_ARG_PTR,
2970 .size = sizeof(struct dummy_net),
2973 .prepare = test_jump2_prepare,
2974 .check_result = test_jump2_check,
2977 .name = "test_alu1",
2978 .arg_sz = sizeof(struct dummy_vect8),
2980 .ins = test_alu1_prog,
2981 .nb_ins = RTE_DIM(test_alu1_prog),
2983 .type = RTE_BPF_ARG_PTR,
2984 .size = sizeof(struct dummy_vect8),
2987 .prepare = test_jump1_prepare,
2988 .check_result = test_alu1_check,
2991 .name = "test_bele1",
2992 .arg_sz = sizeof(struct dummy_vect8),
2994 .ins = test_bele1_prog,
2995 .nb_ins = RTE_DIM(test_bele1_prog),
2997 .type = RTE_BPF_ARG_PTR,
2998 .size = sizeof(struct dummy_vect8),
3001 .prepare = test_bele1_prepare,
3002 .check_result = test_bele1_check,
3005 .name = "test_xadd1",
3006 .arg_sz = sizeof(struct dummy_offset),
3008 .ins = test_xadd1_prog,
3009 .nb_ins = RTE_DIM(test_xadd1_prog),
3011 .type = RTE_BPF_ARG_PTR,
3012 .size = sizeof(struct dummy_offset),
3015 .prepare = test_store1_prepare,
3016 .check_result = test_xadd1_check,
3019 .name = "test_div1",
3020 .arg_sz = sizeof(struct dummy_vect8),
3022 .ins = test_div1_prog,
3023 .nb_ins = RTE_DIM(test_div1_prog),
3025 .type = RTE_BPF_ARG_PTR,
3026 .size = sizeof(struct dummy_vect8),
3029 .prepare = test_mul1_prepare,
3030 .check_result = test_div1_check,
3033 .name = "test_call1",
3034 .arg_sz = sizeof(struct dummy_offset),
3036 .ins = test_call1_prog,
3037 .nb_ins = RTE_DIM(test_call1_prog),
3039 .type = RTE_BPF_ARG_PTR,
3040 .size = sizeof(struct dummy_offset),
3042 .xsym = test_call1_xsym,
3043 .nb_xsym = RTE_DIM(test_call1_xsym),
3045 .prepare = test_load1_prepare,
3046 .check_result = test_call1_check,
3047 /* for now don't support function calls on 32 bit platform */
3048 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3051 .name = "test_call2",
3052 .arg_sz = sizeof(struct dummy_offset),
3054 .ins = test_call2_prog,
3055 .nb_ins = RTE_DIM(test_call2_prog),
3057 .type = RTE_BPF_ARG_PTR,
3058 .size = sizeof(struct dummy_offset),
3060 .xsym = test_call2_xsym,
3061 .nb_xsym = RTE_DIM(test_call2_xsym),
3063 .prepare = test_store1_prepare,
3064 .check_result = test_call2_check,
3065 /* for now don't support function calls on 32 bit platform */
3066 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3069 .name = "test_call3",
3070 .arg_sz = sizeof(struct dummy_vect8),
3072 .ins = test_call3_prog,
3073 .nb_ins = RTE_DIM(test_call3_prog),
3075 .type = RTE_BPF_ARG_PTR,
3076 .size = sizeof(struct dummy_vect8),
3078 .xsym = test_call3_xsym,
3079 .nb_xsym = RTE_DIM(test_call3_xsym),
3081 .prepare = test_call3_prepare,
3082 .check_result = test_call3_check,
3083 /* for now don't support function calls on 32 bit platform */
3084 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3087 .name = "test_call4",
3088 .arg_sz = sizeof(struct dummy_offset),
3090 .ins = test_call4_prog,
3091 .nb_ins = RTE_DIM(test_call4_prog),
3093 .type = RTE_BPF_ARG_PTR,
3094 .size = 2 * sizeof(struct dummy_offset),
3096 .xsym = test_call4_xsym,
3097 .nb_xsym = RTE_DIM(test_call4_xsym),
3099 .prepare = test_store1_prepare,
3100 .check_result = test_call4_check,
3101 /* for now don't support function calls on 32 bit platform */
3102 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3105 .name = "test_call5",
3106 .arg_sz = sizeof(struct dummy_offset),
3108 .ins = test_call5_prog,
3109 .nb_ins = RTE_DIM(test_call5_prog),
3111 .type = RTE_BPF_ARG_PTR,
3112 .size = sizeof(struct dummy_offset),
3114 .xsym = test_call5_xsym,
3115 .nb_xsym = RTE_DIM(test_call5_xsym),
3117 .prepare = test_store1_prepare,
3118 .check_result = test_call5_check,
3119 /* for now don't support function calls on 32 bit platform */
3120 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3123 .name = "test_ld_mbuf1",
3124 .arg_sz = sizeof(struct dummy_mbuf),
3126 .ins = test_ld_mbuf1_prog,
3127 .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3129 .type = RTE_BPF_ARG_PTR_MBUF,
3130 .buf_size = sizeof(struct dummy_mbuf),
3133 .prepare = test_ld_mbuf1_prepare,
3134 .check_result = test_ld_mbuf1_check,
3135 /* mbuf as input argument is not supported on 32 bit platform */
3136 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3139 .name = "test_ld_mbuf2",
3140 .arg_sz = sizeof(struct dummy_mbuf),
3142 .ins = test_ld_mbuf1_prog,
3143 .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
3145 .type = RTE_BPF_ARG_PTR_MBUF,
3146 .buf_size = sizeof(struct dummy_mbuf),
3149 .prepare = test_ld_mbuf2_prepare,
3150 .check_result = test_ld_mbuf2_check,
3151 /* mbuf as input argument is not supported on 32 bit platform */
3152 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3155 .name = "test_ld_mbuf3",
3156 .arg_sz = sizeof(struct dummy_mbuf),
3158 .ins = test_ld_mbuf3_prog,
3159 .nb_ins = RTE_DIM(test_ld_mbuf3_prog),
3161 .type = RTE_BPF_ARG_PTR_MBUF,
3162 .buf_size = sizeof(struct dummy_mbuf),
3165 .prepare = test_ld_mbuf1_prepare,
3166 .check_result = test_ld_mbuf1_check,
3167 /* mbuf as input argument is not supported on 32 bit platform */
3168 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
3173 run_test(const struct bpf_test *tst)
3177 struct rte_bpf *bpf;
3178 struct rte_bpf_jit jit;
3179 uint8_t tbuf[tst->arg_sz];
3181 printf("%s(%s) start\n", __func__, tst->name);
3183 bpf = rte_bpf_load(&tst->prm);
3185 printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
3186 __func__, __LINE__, rte_errno, strerror(rte_errno));
3191 rc = rte_bpf_exec(bpf, tbuf);
3192 ret = tst->check_result(rc, tbuf);
3194 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
3195 __func__, __LINE__, tst->name, ret, strerror(ret));
3198 /* repeat the same test with jit, when possible */
3199 rte_bpf_get_jit(bpf, &jit);
3200 if (jit.func != NULL) {
3203 rc = jit.func(tbuf);
3204 rv = tst->check_result(rc, tbuf);
3207 printf("%s@%d: check_result(%s) failed, "
3209 __func__, __LINE__, tst->name,
3214 rte_bpf_destroy(bpf);
3226 for (i = 0; i != RTE_DIM(tests); i++) {
3227 rv = run_test(tests + i);
3228 if (tests[i].allow_fail == 0)
3235 REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);