X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_bpf.c;h=7c3de96c62369c22ada339dd636f7dc5b86ba785;hb=23ea199b732bf54861aaea49e52c1089334b29ae;hp=1d50401aa862d347ee228a9b1d3e2f28be2130ee;hpb=a9de470cc7c0649221e156fc5f30a2dbdfe7c166;p=dpdk.git diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c index 1d50401aa8..7c3de96c62 100644 --- a/app/test/test_bpf.c +++ b/app/test/test_bpf.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include "test.h" @@ -35,6 +37,20 @@ struct dummy_vect8 { struct dummy_offset out[8]; }; +struct dummy_net { + struct rte_ether_hdr eth_hdr; + struct rte_vlan_hdr vlan_hdr; + struct rte_ipv4_hdr ip_hdr; +}; + +#define DUMMY_MBUF_NUM 2 + +/* first mbuf in the packet, should always be at offset 0 */ +struct dummy_mbuf { + struct rte_mbuf mb[DUMMY_MBUF_NUM]; + uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE]; +}; + #define TEST_FILL_1 0xDEADBEEF #define TEST_MUL_1 21 @@ -54,6 +70,20 @@ struct dummy_vect8 { #define TEST_IMM_4 ((uint64_t)UINT32_MAX) #define TEST_IMM_5 ((uint64_t)UINT32_MAX + 1) +#define TEST_MEMFROB 0x2a2a2a2a + +#define STRING_GEEK 0x6B656567 +#define STRING_WEEK 0x6B656577 + +#define TEST_NETMASK 0xffffff00 +#define TEST_SUBNET 0xaca80200 + +uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF }; +uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA }; + +uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1; +uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2; + struct bpf_test { const char *name; size_t arg_sz; @@ -866,6 +896,171 @@ test_jump1_check(uint64_t rc, const void *arg) return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv)); } +/* Jump test case - check ip4_dest in particular subnet */ +static const struct ebpf_insn test_jump2_prog[] = { + + [0] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = 0xe, + }, + [1] = { + .code = (BPF_LDX | BPF_MEM | BPF_H), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = 12, + }, + [2] = { + .code = (BPF_JMP | EBPF_JNE | BPF_K), + .dst_reg = EBPF_REG_3, + .off = 2, + .imm = 0x81, + }, + [3] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = 0x12, + }, + [4] = { + .code = (BPF_LDX | BPF_MEM | BPF_H), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_1, + .off = 16, + }, + [5] = { + .code = (EBPF_ALU64 | BPF_AND | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = 0xffff, + }, + [6] = { + .code = (BPF_JMP | EBPF_JNE | BPF_K), + .dst_reg = EBPF_REG_3, + .off = 9, + .imm = 0x8, + }, + [7] = { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + }, + [8] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0, + }, + [9] = { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_1, + .off = 16, + }, + [10] = { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = TEST_NETMASK, + }, + [11] = { + .code = (BPF_ALU | EBPF_END | EBPF_TO_BE), + .dst_reg = EBPF_REG_3, + .imm = sizeof(uint32_t) * CHAR_BIT, + }, + [12] = { + .code = (BPF_ALU | BPF_AND | BPF_X), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + }, + [13] = { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_3, + .imm = TEST_SUBNET, + }, + [14] = { + .code = (BPF_ALU | EBPF_END | EBPF_TO_BE), + .dst_reg = EBPF_REG_3, + .imm = sizeof(uint32_t) * CHAR_BIT, + }, + [15] = { + .code = (BPF_JMP | BPF_JEQ | BPF_X), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_3, + .off = 1, + }, + [16] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = -1, + }, + [17] = { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +/* Preparing a vlan packet */ +static void +test_jump2_prepare(void *arg) +{ + struct dummy_net *dn; + + dn = arg; + memset(dn, 0, sizeof(*dn)); + + /* + * Initialize ether header. + */ + rte_ether_addr_copy((struct rte_ether_addr *)dst_mac, + &dn->eth_hdr.d_addr); + rte_ether_addr_copy((struct rte_ether_addr *)src_mac, + &dn->eth_hdr.s_addr); + dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); + + /* + * Initialize vlan header. + */ + dn->vlan_hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + dn->vlan_hdr.vlan_tci = 32; + + /* + * Initialize IP header. + */ + dn->ip_hdr.version_ihl = 0x45; /*IP_VERSION | IP_HDRLEN*/ + dn->ip_hdr.time_to_live = 64; /* IP_DEFTTL */ + dn->ip_hdr.next_proto_id = IPPROTO_TCP; + dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c); + dn->ip_hdr.total_length = rte_cpu_to_be_16(60); + dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr); + dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr); +} + +static int +test_jump2_check(uint64_t rc, const void *arg) +{ + const struct rte_ether_hdr *eth_hdr = arg; + const struct rte_ipv4_hdr *ipv4_hdr; + const void *next = eth_hdr; + uint16_t eth_type; + uint64_t v = -1; + + if (eth_hdr->ether_type == htons(0x8100)) { + const struct rte_vlan_hdr *vlan_hdr = + (const void *)(eth_hdr + 1); + eth_type = vlan_hdr->eth_proto; + next = vlan_hdr + 1; + } else { + eth_type = eth_hdr->ether_type; + next = eth_hdr + 1; + } + + if (eth_type == htons(0x0800)) { + ipv4_hdr = next; + if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) == + rte_cpu_to_be_32(TEST_SUBNET)) { + v = 0; + } + } + + return cmp_res(__func__, v, rc, arg, arg, sizeof(arg)); +} + /* alu (add, sub, and, or, xor, neg) test-cases */ static const struct ebpf_insn test_alu1_prog[] = { @@ -1610,13 +1805,6 @@ test_call1_check(uint64_t rc, const void *arg) dummy_func1(arg, &v32, &v64); v64 += v32; - if (v64 != rc) { - printf("%s@%d: invalid return value " - "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n", - __func__, __LINE__, v64, rc); - return -1; - } - return 0; return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv)); } @@ -1747,13 +1935,7 @@ test_call2_check(uint64_t rc, const void *arg) dummy_func2(&a, &b); v = a.u64 + a.u32 + b.u16 + b.u8; - if (v != rc) { - printf("%s@%d: invalid return value " - "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n", - __func__, __LINE__, v, rc); - return -1; - } - return 0; + return cmp_res(__func__, v, rc, arg, arg, 0); } static const struct rte_bpf_xsym test_call2_xsym[] = { @@ -1777,136 +1959,1051 @@ static const struct rte_bpf_xsym test_call2_xsym[] = { }, }; -static const struct bpf_test tests[] = { +static const struct ebpf_insn test_call3_prog[] = { + { - .name = "test_store1", - .arg_sz = sizeof(struct dummy_offset), - .prm = { - .ins = test_store1_prog, - .nb_ins = RTE_DIM(test_store1_prog), - .prog_arg = { - .type = RTE_BPF_ARG_PTR, - .size = sizeof(struct dummy_offset), - }, - }, - .prepare = test_store1_prepare, - .check_result = test_store1_check, + .code = (BPF_JMP | EBPF_CALL), + .imm = 0, }, { - .name = "test_store2", - .arg_sz = sizeof(struct dummy_offset), - .prm = { - .ins = test_store2_prog, - .nb_ins = RTE_DIM(test_store2_prog), - .prog_arg = { - .type = RTE_BPF_ARG_PTR, - .size = sizeof(struct dummy_offset), - }, - }, - .prepare = test_store1_prepare, - .check_result = test_store1_check, + .code = (BPF_LDX | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_0, + .off = offsetof(struct dummy_offset, u8), }, { - .name = "test_load1", - .arg_sz = sizeof(struct dummy_offset), - .prm = { - .ins = test_load1_prog, - .nb_ins = RTE_DIM(test_load1_prog), - .prog_arg = { - .type = RTE_BPF_ARG_PTR, - .size = sizeof(struct dummy_offset), - }, - }, - .prepare = test_load1_prepare, - .check_result = test_load1_check, + .code = (BPF_LDX | BPF_MEM | BPF_H), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_0, + .off = offsetof(struct dummy_offset, u16), }, { - .name = "test_ldimm1", - .arg_sz = sizeof(struct dummy_offset), - .prm = { - .ins = test_ldimm1_prog, - .nb_ins = RTE_DIM(test_ldimm1_prog), - .prog_arg = { - .type = RTE_BPF_ARG_PTR, - .size = sizeof(struct dummy_offset), - }, - }, - .prepare = test_store1_prepare, - .check_result = test_ldimm1_check, + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_0, + .off = offsetof(struct dummy_offset, u32), }, { - .name = "test_mul1", - .arg_sz = sizeof(struct dummy_vect8), - .prm = { - .ins = test_mul1_prog, - .nb_ins = RTE_DIM(test_mul1_prog), - .prog_arg = { - .type = RTE_BPF_ARG_PTR, - .size = sizeof(struct dummy_vect8), - }, - }, - .prepare = test_mul1_prepare, - .check_result = test_mul1_check, + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_0, + .off = offsetof(struct dummy_offset, u64), }, + /* return sum */ { - .name = "test_shift1", - .arg_sz = sizeof(struct dummy_vect8), - .prm = { - .ins = test_shift1_prog, - .nb_ins = RTE_DIM(test_shift1_prog), - .prog_arg = { - .type = RTE_BPF_ARG_PTR, - .size = sizeof(struct dummy_vect8), - }, - }, - .prepare = test_shift1_prepare, - .check_result = test_shift1_check, + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_4, }, { - .name = "test_jump1", - .arg_sz = sizeof(struct dummy_vect8), - .prm = { - .ins = test_jump1_prog, - .nb_ins = RTE_DIM(test_jump1_prog), - .prog_arg = { - .type = RTE_BPF_ARG_PTR, - .size = sizeof(struct dummy_vect8), - }, - }, - .prepare = test_jump1_prepare, - .check_result = test_jump1_check, + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_3, }, { - .name = "test_alu1", - .arg_sz = sizeof(struct dummy_vect8), - .prm = { - .ins = test_alu1_prog, - .nb_ins = RTE_DIM(test_alu1_prog), - .prog_arg = { - .type = RTE_BPF_ARG_PTR, - .size = sizeof(struct dummy_vect8), - }, - }, - .prepare = test_jump1_prepare, - .check_result = test_alu1_check, + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_2, }, { - .name = "test_bele1", - .arg_sz = sizeof(struct dummy_vect8), - .prm = { - .ins = test_bele1_prog, - .nb_ins = RTE_DIM(test_bele1_prog), - .prog_arg = { - .type = RTE_BPF_ARG_PTR, - .size = sizeof(struct dummy_vect8), - }, - }, - .prepare = test_bele1_prepare, - .check_result = test_bele1_check, + .code = (BPF_JMP | EBPF_EXIT), }, - { - .name = "test_xadd1", - .arg_sz = sizeof(struct dummy_offset), +}; + +static const struct dummy_offset * +dummy_func3(const struct dummy_vect8 *p) +{ + return &p->in[RTE_DIM(p->in) - 1]; +} + +static void +test_call3_prepare(void *arg) +{ + struct dummy_vect8 *pv; + struct dummy_offset *df; + + pv = arg; + df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv); + + memset(pv, 0, sizeof(*pv)); + df->u64 = (int32_t)TEST_FILL_1; + df->u32 = df->u64; + df->u16 = df->u64; + df->u8 = df->u64; +} + +static int +test_call3_check(uint64_t rc, const void *arg) +{ + uint64_t v; + const struct dummy_vect8 *pv; + const struct dummy_offset *dft; + + pv = arg; + dft = dummy_func3(pv); + + v = dft->u64; + v += dft->u32; + v += dft->u16; + v += dft->u8; + + return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv)); +} + +static const struct rte_bpf_xsym test_call3_xsym[] = { + { + .name = RTE_STR(dummy_func3), + .type = RTE_BPF_XTYPE_FUNC, + .func = { + .val = (void *)dummy_func3, + .nb_args = 1, + .args = { + [0] = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .ret = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + }, + }, +}; + +/* Test for stack corruption in multiple function calls */ +static const struct ebpf_insn test_call4_prog[] = { + { + .code = (BPF_ST | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_10, + .off = -4, + .imm = 1, + }, + { + .code = (BPF_ST | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_10, + .off = -3, + .imm = 2, + }, + { + .code = (BPF_ST | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_10, + .off = -2, + .imm = 3, + }, + { + .code = (BPF_ST | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_10, + .off = -1, + .imm = 4, + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_10, + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = 4, + }, + { + .code = (EBPF_ALU64 | BPF_SUB | BPF_X), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_2, + }, + { + .code = (BPF_JMP | EBPF_CALL), + .imm = 0, + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_10, + .off = -4, + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_10, + .off = -3, + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_3, + .src_reg = EBPF_REG_10, + .off = -2, + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_4, + .src_reg = EBPF_REG_10, + .off = -1, + }, + { + .code = (BPF_JMP | EBPF_CALL), + .imm = 1, + }, + { + .code = (EBPF_ALU64 | BPF_XOR | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = TEST_MEMFROB, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +/* Gathering the bytes together */ +static uint32_t +dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d) +{ + return (a << 24) | (b << 16) | (c << 8) | (d << 0); +} + +/* Implementation of memfrob */ +static uint32_t +dummy_func4_0(uint32_t *s, uint8_t n) +{ + char *p = (char *) s; + while (n-- > 0) + *p++ ^= 42; + return *s; +} + + +static int +test_call4_check(uint64_t rc, const void *arg) +{ + uint8_t a[4] = {1, 2, 3, 4}; + uint32_t s, v = 0; + + RTE_SET_USED(arg); + + s = dummy_func4_0((uint32_t *)a, 4); + + s = dummy_func4_1(a[0], a[1], a[2], a[3]); + + v = s ^ TEST_MEMFROB; + + return cmp_res(__func__, v, rc, &v, &rc, sizeof(v)); +} + +static const struct rte_bpf_xsym test_call4_xsym[] = { + [0] = { + .name = RTE_STR(dummy_func4_0), + .type = RTE_BPF_XTYPE_FUNC, + .func = { + .val = (void *)dummy_func4_0, + .nb_args = 2, + .args = { + [0] = { + .type = RTE_BPF_ARG_PTR, + .size = 4 * sizeof(uint8_t), + }, + [1] = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uint8_t), + }, + }, + .ret = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uint32_t), + }, + }, + }, + [1] = { + .name = RTE_STR(dummy_func4_1), + .type = RTE_BPF_XTYPE_FUNC, + .func = { + .val = (void *)dummy_func4_1, + .nb_args = 4, + .args = { + [0] = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uint8_t), + }, + [1] = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uint8_t), + }, + [2] = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uint8_t), + }, + [3] = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uint8_t), + }, + }, + .ret = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uint32_t), + }, + }, + }, +}; + +/* string compare test case */ +static const struct ebpf_insn test_call5_prog[] = { + + [0] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_1, + .imm = STRING_GEEK, + }, + [1] = { + .code = (BPF_STX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_10, + .src_reg = EBPF_REG_1, + .off = -8, + }, + [2] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_6, + .imm = 0, + }, + [3] = { + .code = (BPF_STX | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_10, + .src_reg = EBPF_REG_6, + .off = -4, + }, + [4] = { + .code = (BPF_STX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_10, + .src_reg = EBPF_REG_6, + .off = -12, + }, + [5] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_1, + .imm = STRING_WEEK, + }, + [6] = { + .code = (BPF_STX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_10, + .src_reg = EBPF_REG_1, + .off = -16, + }, + [7] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_10, + }, + [8] = { + .code = (EBPF_ALU64 | BPF_ADD | BPF_K), + .dst_reg = EBPF_REG_1, + .imm = -8, + }, + [9] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_1, + }, + [10] = { + .code = (BPF_JMP | EBPF_CALL), + .imm = 0, + }, + [11] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_0, + }, + [12] = { + .code = (BPF_ALU | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = -1, + }, + [13] = { + .code = (EBPF_ALU64 | BPF_LSH | BPF_K), + .dst_reg = EBPF_REG_1, + .imm = 0x20, + }, + [14] = { + .code = (EBPF_ALU64 | BPF_RSH | BPF_K), + .dst_reg = EBPF_REG_1, + .imm = 0x20, + }, + [15] = { + .code = (BPF_JMP | EBPF_JNE | BPF_K), + .dst_reg = EBPF_REG_1, + .off = 11, + .imm = 0, + }, + [16] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_10, + }, + [17] = { + .code = (EBPF_ALU64 | BPF_ADD | BPF_K), + .dst_reg = EBPF_REG_1, + .imm = -8, + }, + [18] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_10, + }, + [19] = { + .code = (EBPF_ALU64 | BPF_ADD | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = -16, + }, + [20] = { + .code = (BPF_JMP | EBPF_CALL), + .imm = 0, + }, + [21] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_0, + }, + [22] = { + .code = (EBPF_ALU64 | BPF_LSH | BPF_K), + .dst_reg = EBPF_REG_1, + .imm = 0x20, + }, + [23] = { + .code = (EBPF_ALU64 | BPF_RSH | BPF_K), + .dst_reg = EBPF_REG_1, + .imm = 0x20, + }, + [24] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_1, + }, + [25] = { + .code = (BPF_JMP | BPF_JEQ | BPF_X), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_6, + .off = 1, + }, + [26] = { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 0, + }, + [27] = { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +/* String comparision impelementation, return 0 if equal else difference */ +static uint32_t +dummy_func5(const char *s1, const char *s2) +{ + while (*s1 && (*s1 == *s2)) { + s1++; + s2++; + } + return *(const unsigned char *)s1 - *(const unsigned char *)s2; +} + +static int +test_call5_check(uint64_t rc, const void *arg) +{ + char a[] = "geek"; + char b[] = "week"; + uint32_t v; + + RTE_SET_USED(arg); + + v = dummy_func5(a, a); + if (v != 0) { + v = -1; + goto fail; + } + + v = dummy_func5(a, b); + if (v == 0) + goto fail; + + v = 0; + +fail: + return cmp_res(__func__, v, rc, &v, &rc, sizeof(v)); +} + +static const struct rte_bpf_xsym test_call5_xsym[] = { + [0] = { + .name = RTE_STR(dummy_func5), + .type = RTE_BPF_XTYPE_FUNC, + .func = { + .val = (void *)dummy_func5, + .nb_args = 2, + .args = { + [0] = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(char), + }, + [1] = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(char), + }, + }, + .ret = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uint32_t), + }, + }, + }, +}; + +/* load mbuf (BPF_ABS/BPF_IND) test-cases */ +static const struct ebpf_insn test_ld_mbuf1_prog[] = { + + /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */ + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_6, + .src_reg = EBPF_REG_1, + }, + /* load IPv4 version and IHL */ + { + .code = (BPF_LD | BPF_ABS | BPF_B), + .imm = offsetof(struct rte_ipv4_hdr, version_ihl), + }, + /* check IP version */ + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_0, + }, + { + .code = (BPF_ALU | BPF_AND | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = 0xf0, + }, + { + .code = (BPF_JMP | BPF_JEQ | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = IPVERSION << 4, + .off = 2, + }, + /* invalid IP version, return 0 */ + { + .code = (EBPF_ALU64 | BPF_XOR | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_0, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, + /* load 3-rd byte of IP data */ + { + .code = (BPF_ALU | BPF_AND | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = RTE_IPV4_HDR_IHL_MASK, + }, + { + .code = (BPF_ALU | BPF_LSH | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 2, + }, + { + .code = (BPF_LD | BPF_IND | BPF_B), + .src_reg = EBPF_REG_0, + .imm = 3, + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_7, + .src_reg = EBPF_REG_0, + }, + /* load IPv4 src addr */ + { + .code = (BPF_LD | BPF_ABS | BPF_W), + .imm = offsetof(struct rte_ipv4_hdr, src_addr), + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_7, + .src_reg = EBPF_REG_0, + }, + /* load IPv4 total length */ + { + .code = (BPF_LD | BPF_ABS | BPF_H), + .imm = offsetof(struct rte_ipv4_hdr, total_length), + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_8, + .src_reg = EBPF_REG_0, + }, + /* load last 4 bytes of IP data */ + { + .code = (BPF_LD | BPF_IND | BPF_W), + .src_reg = EBPF_REG_8, + .imm = -(int32_t)sizeof(uint32_t), + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_7, + .src_reg = EBPF_REG_0, + }, + /* load 2 bytes from the middle of IP data */ + { + .code = (EBPF_ALU64 | BPF_RSH | BPF_K), + .dst_reg = EBPF_REG_8, + .imm = 1, + }, + { + .code = (BPF_LD | BPF_IND | BPF_H), + .src_reg = EBPF_REG_8, + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_7, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +static void +dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len, + uint32_t data_len) +{ + uint32_t i; + uint8_t *db; + + mb->buf_addr = buf; + mb->buf_iova = (uintptr_t)buf; + mb->buf_len = buf_len; + rte_mbuf_refcnt_set(mb, 1); + + /* set pool pointer to dummy value, test doesn't use it */ + mb->pool = (void *)buf; + + rte_pktmbuf_reset(mb); + db = (uint8_t *)rte_pktmbuf_append(mb, data_len); + + for (i = 0; i != data_len; i++) + db[i] = i; +} + +static void +test_ld_mbuf1_prepare(void *arg) +{ + struct dummy_mbuf *dm; + struct rte_ipv4_hdr *ph; + + const uint32_t plen = 400; + const struct rte_ipv4_hdr iph = { + .version_ihl = RTE_IPV4_VHL_DEF, + .total_length = rte_cpu_to_be_16(plen), + .time_to_live = IPDEFTTL, + .next_proto_id = IPPROTO_RAW, + .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK), + .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST), + }; + + dm = arg; + memset(dm, 0, sizeof(*dm)); + + dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]), + plen / 2 + 1); + dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]), + plen / 2 - 1); + + rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]); + + ph = rte_pktmbuf_mtod(dm->mb, typeof(ph)); + memcpy(ph, &iph, sizeof(iph)); +} + +static uint64_t +test_ld_mbuf1(const struct rte_mbuf *pkt) +{ + uint64_t n, v; + const uint8_t *p8; + const uint16_t *p16; + const uint32_t *p32; + struct dummy_offset dof; + + /* load IPv4 version and IHL */ + p8 = rte_pktmbuf_read(pkt, + offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8), + &dof); + if (p8 == NULL) + return 0; + + /* check IP version */ + if ((p8[0] & 0xf0) != IPVERSION << 4) + return 0; + + n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER; + + /* load 3-rd byte of IP data */ + p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof); + if (p8 == NULL) + return 0; + + v = p8[0]; + + /* load IPv4 src addr */ + p32 = rte_pktmbuf_read(pkt, + offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32), + &dof); + if (p32 == NULL) + return 0; + + v += rte_be_to_cpu_32(p32[0]); + + /* load IPv4 total length */ + p16 = rte_pktmbuf_read(pkt, + offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16), + &dof); + if (p16 == NULL) + return 0; + + n = rte_be_to_cpu_16(p16[0]); + + /* load last 4 bytes of IP data */ + p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof); + if (p32 == NULL) + return 0; + + v += rte_be_to_cpu_32(p32[0]); + + /* load 2 bytes from the middle of IP data */ + p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof); + if (p16 == NULL) + return 0; + + v += rte_be_to_cpu_16(p16[0]); + return v; +} + +static int +test_ld_mbuf1_check(uint64_t rc, const void *arg) +{ + const struct dummy_mbuf *dm; + uint64_t v; + + dm = arg; + v = test_ld_mbuf1(dm->mb); + return cmp_res(__func__, v, rc, arg, arg, 0); +} + +/* + * same as ld_mbuf1, but then trancate the mbuf by 1B, + * so load of last 4B fail. + */ +static void +test_ld_mbuf2_prepare(void *arg) +{ + struct dummy_mbuf *dm; + + test_ld_mbuf1_prepare(arg); + dm = arg; + rte_pktmbuf_trim(dm->mb, 1); +} + +static int +test_ld_mbuf2_check(uint64_t rc, const void *arg) +{ + return cmp_res(__func__, 0, rc, arg, arg, 0); +} + +/* same as test_ld_mbuf1, but now store intermediate results on the stack */ +static const struct ebpf_insn test_ld_mbuf3_prog[] = { + + /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */ + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_6, + .src_reg = EBPF_REG_1, + }, + /* load IPv4 version and IHL */ + { + .code = (BPF_LD | BPF_ABS | BPF_B), + .imm = offsetof(struct rte_ipv4_hdr, version_ihl), + }, + /* check IP version */ + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_2, + .src_reg = EBPF_REG_0, + }, + { + .code = (BPF_ALU | BPF_AND | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = 0xf0, + }, + { + .code = (BPF_JMP | BPF_JEQ | BPF_K), + .dst_reg = EBPF_REG_2, + .imm = IPVERSION << 4, + .off = 2, + }, + /* invalid IP version, return 0 */ + { + .code = (EBPF_ALU64 | BPF_XOR | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_0, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, + /* load 3-rd byte of IP data */ + { + .code = (BPF_ALU | BPF_AND | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = RTE_IPV4_HDR_IHL_MASK, + }, + { + .code = (BPF_ALU | BPF_LSH | BPF_K), + .dst_reg = EBPF_REG_0, + .imm = 2, + }, + { + .code = (BPF_LD | BPF_IND | BPF_B), + .src_reg = EBPF_REG_0, + .imm = 3, + }, + { + .code = (BPF_STX | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_10, + .src_reg = EBPF_REG_0, + .off = (int16_t)(offsetof(struct dummy_offset, u8) - + sizeof(struct dummy_offset)), + }, + /* load IPv4 src addr */ + { + .code = (BPF_LD | BPF_ABS | BPF_W), + .imm = offsetof(struct rte_ipv4_hdr, src_addr), + }, + { + .code = (BPF_STX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_10, + .src_reg = EBPF_REG_0, + .off = (int16_t)(offsetof(struct dummy_offset, u32) - + sizeof(struct dummy_offset)), + }, + /* load IPv4 total length */ + { + .code = (BPF_LD | BPF_ABS | BPF_H), + .imm = offsetof(struct rte_ipv4_hdr, total_length), + }, + { + .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), + .dst_reg = EBPF_REG_8, + .src_reg = EBPF_REG_0, + }, + /* load last 4 bytes of IP data */ + { + .code = (BPF_LD | BPF_IND | BPF_W), + .src_reg = EBPF_REG_8, + .imm = -(int32_t)sizeof(uint32_t), + }, + { + .code = (BPF_STX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_10, + .src_reg = EBPF_REG_0, + .off = (int16_t)(offsetof(struct dummy_offset, u64) - + sizeof(struct dummy_offset)), + }, + /* load 2 bytes from the middle of IP data */ + { + .code = (EBPF_ALU64 | BPF_RSH | BPF_K), + .dst_reg = EBPF_REG_8, + .imm = 1, + }, + { + .code = (BPF_LD | BPF_IND | BPF_H), + .src_reg = EBPF_REG_8, + }, + { + .code = (BPF_LDX | BPF_MEM | EBPF_DW), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_10, + .off = (int16_t)(offsetof(struct dummy_offset, u64) - + sizeof(struct dummy_offset)), + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_1, + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_W), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_10, + .off = (int16_t)(offsetof(struct dummy_offset, u32) - + sizeof(struct dummy_offset)), + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_1, + }, + { + .code = (BPF_LDX | BPF_MEM | BPF_B), + .dst_reg = EBPF_REG_1, + .src_reg = EBPF_REG_10, + .off = (int16_t)(offsetof(struct dummy_offset, u8) - + sizeof(struct dummy_offset)), + }, + { + .code = (EBPF_ALU64 | BPF_ADD | BPF_X), + .dst_reg = EBPF_REG_0, + .src_reg = EBPF_REG_1, + }, + { + .code = (BPF_JMP | EBPF_EXIT), + }, +}; + +/* all bpf test cases */ +static const struct bpf_test tests[] = { + { + .name = "test_store1", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_store1_prog, + .nb_ins = RTE_DIM(test_store1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + }, + .prepare = test_store1_prepare, + .check_result = test_store1_check, + }, + { + .name = "test_store2", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_store2_prog, + .nb_ins = RTE_DIM(test_store2_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + }, + .prepare = test_store1_prepare, + .check_result = test_store1_check, + }, + { + .name = "test_load1", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_load1_prog, + .nb_ins = RTE_DIM(test_load1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + }, + .prepare = test_load1_prepare, + .check_result = test_load1_check, + }, + { + .name = "test_ldimm1", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_ldimm1_prog, + .nb_ins = RTE_DIM(test_ldimm1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + }, + .prepare = test_store1_prepare, + .check_result = test_ldimm1_check, + }, + { + .name = "test_mul1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_mul1_prog, + .nb_ins = RTE_DIM(test_mul1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_mul1_prepare, + .check_result = test_mul1_check, + }, + { + .name = "test_shift1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_shift1_prog, + .nb_ins = RTE_DIM(test_shift1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_shift1_prepare, + .check_result = test_shift1_check, + }, + { + .name = "test_jump1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_jump1_prog, + .nb_ins = RTE_DIM(test_jump1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_jump1_prepare, + .check_result = test_jump1_check, + }, + { + .name = "test_jump2", + .arg_sz = sizeof(struct dummy_net), + .prm = { + .ins = test_jump2_prog, + .nb_ins = RTE_DIM(test_jump2_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_net), + }, + }, + .prepare = test_jump2_prepare, + .check_result = test_jump2_check, + }, + { + .name = "test_alu1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_alu1_prog, + .nb_ins = RTE_DIM(test_alu1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_jump1_prepare, + .check_result = test_alu1_check, + }, + { + .name = "test_bele1", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_bele1_prog, + .nb_ins = RTE_DIM(test_bele1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + }, + .prepare = test_bele1_prepare, + .check_result = test_bele1_check, + }, + { + .name = "test_xadd1", + .arg_sz = sizeof(struct dummy_offset), .prm = { .ins = test_xadd1_prog, .nb_ins = RTE_DIM(test_xadd1_prog), @@ -1968,6 +3065,108 @@ static const struct bpf_test tests[] = { /* for now don't support function calls on 32 bit platform */ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), }, + { + .name = "test_call3", + .arg_sz = sizeof(struct dummy_vect8), + .prm = { + .ins = test_call3_prog, + .nb_ins = RTE_DIM(test_call3_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_vect8), + }, + .xsym = test_call3_xsym, + .nb_xsym = RTE_DIM(test_call3_xsym), + }, + .prepare = test_call3_prepare, + .check_result = test_call3_check, + /* for now don't support function calls on 32 bit platform */ + .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), + }, + { + .name = "test_call4", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_call4_prog, + .nb_ins = RTE_DIM(test_call4_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = 2 * sizeof(struct dummy_offset), + }, + .xsym = test_call4_xsym, + .nb_xsym = RTE_DIM(test_call4_xsym), + }, + .prepare = test_store1_prepare, + .check_result = test_call4_check, + /* for now don't support function calls on 32 bit platform */ + .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), + }, + { + .name = "test_call5", + .arg_sz = sizeof(struct dummy_offset), + .prm = { + .ins = test_call5_prog, + .nb_ins = RTE_DIM(test_call5_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(struct dummy_offset), + }, + .xsym = test_call5_xsym, + .nb_xsym = RTE_DIM(test_call5_xsym), + }, + .prepare = test_store1_prepare, + .check_result = test_call5_check, + /* for now don't support function calls on 32 bit platform */ + .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), + }, + { + .name = "test_ld_mbuf1", + .arg_sz = sizeof(struct dummy_mbuf), + .prm = { + .ins = test_ld_mbuf1_prog, + .nb_ins = RTE_DIM(test_ld_mbuf1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR_MBUF, + .buf_size = sizeof(struct dummy_mbuf), + }, + }, + .prepare = test_ld_mbuf1_prepare, + .check_result = test_ld_mbuf1_check, + /* mbuf as input argument is not supported on 32 bit platform */ + .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), + }, + { + .name = "test_ld_mbuf2", + .arg_sz = sizeof(struct dummy_mbuf), + .prm = { + .ins = test_ld_mbuf1_prog, + .nb_ins = RTE_DIM(test_ld_mbuf1_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR_MBUF, + .buf_size = sizeof(struct dummy_mbuf), + }, + }, + .prepare = test_ld_mbuf2_prepare, + .check_result = test_ld_mbuf2_check, + /* mbuf as input argument is not supported on 32 bit platform */ + .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), + }, + { + .name = "test_ld_mbuf3", + .arg_sz = sizeof(struct dummy_mbuf), + .prm = { + .ins = test_ld_mbuf3_prog, + .nb_ins = RTE_DIM(test_ld_mbuf3_prog), + .prog_arg = { + .type = RTE_BPF_ARG_PTR_MBUF, + .buf_size = sizeof(struct dummy_mbuf), + }, + }, + .prepare = test_ld_mbuf1_prepare, + .check_result = test_ld_mbuf1_check, + /* mbuf as input argument is not supported on 32 bit platform */ + .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), + }, }; static int @@ -1989,7 +3188,6 @@ run_test(const struct bpf_test *tst) } tst->prepare(tbuf); - rc = rte_bpf_exec(bpf, tbuf); ret = tst->check_result(rc, tbuf); if (ret != 0) { @@ -1997,17 +3195,20 @@ run_test(const struct bpf_test *tst) __func__, __LINE__, tst->name, ret, strerror(ret)); } + /* repeat the same test with jit, when possible */ rte_bpf_get_jit(bpf, &jit); - if (jit.func == NULL) - return 0; - - tst->prepare(tbuf); - rc = jit.func(tbuf); - rv = tst->check_result(rc, tbuf); - ret |= rv; - if (rv != 0) { - printf("%s@%d: check_result(%s) failed, error: %d(%s);\n", - __func__, __LINE__, tst->name, rv, strerror(ret)); + if (jit.func != NULL) { + + tst->prepare(tbuf); + rc = jit.func(tbuf); + rv = tst->check_result(rc, tbuf); + ret |= rv; + if (rv != 0) { + printf("%s@%d: check_result(%s) failed, " + "error: %d(%s);\n", + __func__, __LINE__, tst->name, + rv, strerror(ret)); + } } rte_bpf_destroy(bpf);