#include <rte_memory.h>
#include <rte_debug.h>
#include <rte_hexdump.h>
+#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
+#include "test.h"
+
+#if !defined(RTE_LIB_BPF)
+
+static int
+test_bpf(void)
+{
+ printf("BPF not supported, skipping test\n");
+ return TEST_SKIPPED;
+}
+
+#else
+
#include <rte_bpf.h>
#include <rte_ether.h>
#include <rte_ip.h>
-#include "test.h"
/*
* Basic functional tests for librte_bpf.
* The main procedure - load eBPF program, execute it and
- * compare restuls with expected values.
+ * compare results with expected values.
*/
struct dummy_offset {
struct rte_ipv4_hdr ip_hdr;
};
+#define DUMMY_MBUF_NUM 2
+
+/* first mbuf in the packet, should always be at offset 0 */
+struct dummy_mbuf {
+ struct rte_mbuf mb[DUMMY_MBUF_NUM];
+ uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE];
+};
+
#define TEST_FILL_1 0xDEADBEEF
#define TEST_MUL_1 21
#define TEST_SHIFT_1 15
#define TEST_SHIFT_2 33
+#define TEST_SHIFT32_MASK (CHAR_BIT * sizeof(uint32_t) - 1)
+#define TEST_SHIFT64_MASK (CHAR_BIT * sizeof(uint64_t) - 1)
+
#define TEST_JCC_1 0
#define TEST_JCC_2 -123
#define TEST_JCC_3 5678
.off = offsetof(struct dummy_vect8, out[1].u64),
},
{
- .code = (BPF_ALU | BPF_RSH | BPF_X),
- .dst_reg = EBPF_REG_2,
- .src_reg = EBPF_REG_4,
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_4,
+ .imm = TEST_SHIFT64_MASK,
},
{
.code = (EBPF_ALU64 | BPF_LSH | BPF_X),
.dst_reg = EBPF_REG_3,
.src_reg = EBPF_REG_4,
},
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_4,
+ .imm = TEST_SHIFT32_MASK,
+ },
+ {
+ .code = (BPF_ALU | BPF_RSH | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_4,
+ },
{
.code = (BPF_STX | BPF_MEM | EBPF_DW),
.dst_reg = EBPF_REG_1,
{
.code = (BPF_ALU | BPF_AND | BPF_K),
.dst_reg = EBPF_REG_2,
- .imm = sizeof(uint64_t) * CHAR_BIT - 1,
+ .imm = TEST_SHIFT64_MASK,
},
{
.code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
{
.code = (BPF_ALU | BPF_AND | BPF_K),
.dst_reg = EBPF_REG_2,
- .imm = sizeof(uint32_t) * CHAR_BIT - 1,
+ .imm = TEST_SHIFT32_MASK,
},
{
.code = (BPF_ALU | BPF_LSH | BPF_X),
dve.out[0].u64 = r2;
dve.out[1].u64 = r3;
- r2 = (uint32_t)r2 >> r4;
+ r4 &= TEST_SHIFT64_MASK;
r3 <<= r4;
+ r4 &= TEST_SHIFT32_MASK;
+ r2 = (uint32_t)r2 >> r4;
dve.out[2].u64 = r2;
dve.out[3].u64 = r3;
r3 = dvt->in[1].u64;
r4 = dvt->in[2].u32;
- r2 &= sizeof(uint64_t) * CHAR_BIT - 1;
+ r2 &= TEST_SHIFT64_MASK;
r3 = (int64_t)r3 >> r2;
- r2 &= sizeof(uint32_t) * CHAR_BIT - 1;
+ r2 &= TEST_SHIFT32_MASK;
r4 = (uint32_t)r4 << r2;
dve.out[4].u64 = r4;
* Initialize ether header.
*/
rte_ether_addr_copy((struct rte_ether_addr *)dst_mac,
- &dn->eth_hdr.d_addr);
+ &dn->eth_hdr.dst_addr);
rte_ether_addr_copy((struct rte_ether_addr *)src_mac,
- &dn->eth_hdr.s_addr);
+ &dn->eth_hdr.src_addr);
dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
/*
memset(&dfe, 0, sizeof(dfe));
rv = 1;
- rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
- rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+ __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
rv = -1;
- rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
- rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+ __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
rv = (int32_t)TEST_FILL_1;
- rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
- rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+ __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
rv = TEST_MUL_1;
- rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
- rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+ __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
rv = TEST_MUL_2;
- rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
- rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+ __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
rv = TEST_JCC_2;
- rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
- rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+ __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
rv = TEST_JCC_3;
- rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
- rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+ __atomic_fetch_add(&dfe.u32, rv, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&dfe.u64, rv, __ATOMIC_RELAXED);
return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
}
dummy_func1(arg, &v32, &v64);
v64 += v32;
- if (v64 != rc) {
- printf("%s@%d: invalid return value "
- "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
- __func__, __LINE__, v64, rc);
- return -1;
- }
- return 0;
return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
}
dummy_func2(&a, &b);
v = a.u64 + a.u32 + b.u16 + b.u8;
- if (v != rc) {
- printf("%s@%d: invalid return value "
- "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
- __func__, __LINE__, v, rc);
- return -1;
- }
- return 0;
+ return cmp_res(__func__, v, rc, arg, arg, 0);
}
static const struct rte_bpf_xsym test_call2_xsym[] = {
},
};
-/* String comparision impelementation, return 0 if equal else difference */
+/* String comparison implementation, return 0 if equal else difference */
static uint32_t
dummy_func5(const char *s1, const char *s2)
{
v = 0;
fail:
-
return cmp_res(__func__, v, rc, &v, &rc, sizeof(v));
}
},
};
+/* load mbuf (BPF_ABS/BPF_IND) test-cases */
+static const struct ebpf_insn test_ld_mbuf1_prog[] = {
+
+ /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_6,
+ .src_reg = EBPF_REG_1,
+ },
+ /* load IPv4 version and IHL */
+ {
+ .code = (BPF_LD | BPF_ABS | BPF_B),
+ .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
+ },
+ /* check IP version */
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_0,
+ },
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = 0xf0,
+ },
+ {
+ .code = (BPF_JMP | BPF_JEQ | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = IPVERSION << 4,
+ .off = 2,
+ },
+ /* invalid IP version, return 0 */
+ {
+ .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_0,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+ /* load 3-rd byte of IP data */
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = RTE_IPV4_HDR_IHL_MASK,
+ },
+ {
+ .code = (BPF_ALU | BPF_LSH | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 2,
+ },
+ {
+ .code = (BPF_LD | BPF_IND | BPF_B),
+ .src_reg = EBPF_REG_0,
+ .imm = 3,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_7,
+ .src_reg = EBPF_REG_0,
+ },
+ /* load IPv4 src addr */
+ {
+ .code = (BPF_LD | BPF_ABS | BPF_W),
+ .imm = offsetof(struct rte_ipv4_hdr, src_addr),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_7,
+ .src_reg = EBPF_REG_0,
+ },
+ /* load IPv4 total length */
+ {
+ .code = (BPF_LD | BPF_ABS | BPF_H),
+ .imm = offsetof(struct rte_ipv4_hdr, total_length),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_8,
+ .src_reg = EBPF_REG_0,
+ },
+ /* load last 4 bytes of IP data */
+ {
+ .code = (BPF_LD | BPF_IND | BPF_W),
+ .src_reg = EBPF_REG_8,
+ .imm = -(int32_t)sizeof(uint32_t),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_7,
+ .src_reg = EBPF_REG_0,
+ },
+ /* load 2 bytes from the middle of IP data */
+ {
+ .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
+ .dst_reg = EBPF_REG_8,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_LD | BPF_IND | BPF_H),
+ .src_reg = EBPF_REG_8,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_7,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
+ uint32_t data_len)
+{
+ uint32_t i;
+ uint8_t *db;
+
+ mb->buf_addr = buf;
+ mb->buf_iova = (uintptr_t)buf;
+ mb->buf_len = buf_len;
+ rte_mbuf_refcnt_set(mb, 1);
+
+ /* set pool pointer to dummy value, test doesn't use it */
+ mb->pool = (void *)buf;
+
+ rte_pktmbuf_reset(mb);
+ db = (uint8_t *)rte_pktmbuf_append(mb, data_len);
+
+ for (i = 0; i != data_len; i++)
+ db[i] = i;
+}
+
+static void
+test_ld_mbuf1_prepare(void *arg)
+{
+ struct dummy_mbuf *dm;
+ struct rte_ipv4_hdr *ph;
+
+ const uint32_t plen = 400;
+ const struct rte_ipv4_hdr iph = {
+ .version_ihl = RTE_IPV4_VHL_DEF,
+ .total_length = rte_cpu_to_be_16(plen),
+ .time_to_live = IPDEFTTL,
+ .next_proto_id = IPPROTO_RAW,
+ .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
+ .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
+ };
+
+ dm = arg;
+ memset(dm, 0, sizeof(*dm));
+
+ dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]),
+ plen / 2 + 1);
+ dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]),
+ plen / 2 - 1);
+
+ rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]);
+
+ ph = rte_pktmbuf_mtod(dm->mb, typeof(ph));
+ memcpy(ph, &iph, sizeof(iph));
+}
+
+static uint64_t
+test_ld_mbuf1(const struct rte_mbuf *pkt)
+{
+ uint64_t n, v;
+ const uint8_t *p8;
+ const uint16_t *p16;
+ const uint32_t *p32;
+ struct dummy_offset dof;
+
+ /* load IPv4 version and IHL */
+ p8 = rte_pktmbuf_read(pkt,
+ offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8),
+ &dof);
+ if (p8 == NULL)
+ return 0;
+
+ /* check IP version */
+ if ((p8[0] & 0xf0) != IPVERSION << 4)
+ return 0;
+
+ n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
+
+ /* load 3-rd byte of IP data */
+ p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof);
+ if (p8 == NULL)
+ return 0;
+
+ v = p8[0];
+
+ /* load IPv4 src addr */
+ p32 = rte_pktmbuf_read(pkt,
+ offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32),
+ &dof);
+ if (p32 == NULL)
+ return 0;
+
+ v += rte_be_to_cpu_32(p32[0]);
+
+ /* load IPv4 total length */
+ p16 = rte_pktmbuf_read(pkt,
+ offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16),
+ &dof);
+ if (p16 == NULL)
+ return 0;
+
+ n = rte_be_to_cpu_16(p16[0]);
+
+ /* load last 4 bytes of IP data */
+ p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof);
+ if (p32 == NULL)
+ return 0;
+
+ v += rte_be_to_cpu_32(p32[0]);
+
+ /* load 2 bytes from the middle of IP data */
+ p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof);
+ if (p16 == NULL)
+ return 0;
+
+ v += rte_be_to_cpu_16(p16[0]);
+ return v;
+}
+
+static int
+test_ld_mbuf1_check(uint64_t rc, const void *arg)
+{
+ const struct dummy_mbuf *dm;
+ uint64_t v;
+
+ dm = arg;
+ v = test_ld_mbuf1(dm->mb);
+ return cmp_res(__func__, v, rc, arg, arg, 0);
+}
+
+/*
+ * same as ld_mbuf1, but then truncate the mbuf by 1B,
+ * so load of last 4B fail.
+ */
+static void
+test_ld_mbuf2_prepare(void *arg)
+{
+ struct dummy_mbuf *dm;
+
+ test_ld_mbuf1_prepare(arg);
+ dm = arg;
+ rte_pktmbuf_trim(dm->mb, 1);
+}
+
+static int
+test_ld_mbuf2_check(uint64_t rc, const void *arg)
+{
+ return cmp_res(__func__, 0, rc, arg, arg, 0);
+}
+
+/* same as test_ld_mbuf1, but now store intermediate results on the stack */
+static const struct ebpf_insn test_ld_mbuf3_prog[] = {
+
+ /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_6,
+ .src_reg = EBPF_REG_1,
+ },
+ /* load IPv4 version and IHL */
+ {
+ .code = (BPF_LD | BPF_ABS | BPF_B),
+ .imm = offsetof(struct rte_ipv4_hdr, version_ihl),
+ },
+ /* check IP version */
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_0,
+ },
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = 0xf0,
+ },
+ {
+ .code = (BPF_JMP | BPF_JEQ | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = IPVERSION << 4,
+ .off = 2,
+ },
+ /* invalid IP version, return 0 */
+ {
+ .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_0,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+ /* load 3-rd byte of IP data */
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = RTE_IPV4_HDR_IHL_MASK,
+ },
+ {
+ .code = (BPF_ALU | BPF_LSH | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 2,
+ },
+ {
+ .code = (BPF_LD | BPF_IND | BPF_B),
+ .src_reg = EBPF_REG_0,
+ .imm = 3,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | BPF_B),
+ .dst_reg = EBPF_REG_10,
+ .src_reg = EBPF_REG_0,
+ .off = (int16_t)(offsetof(struct dummy_offset, u8) -
+ sizeof(struct dummy_offset)),
+ },
+ /* load IPv4 src addr */
+ {
+ .code = (BPF_LD | BPF_ABS | BPF_W),
+ .imm = offsetof(struct rte_ipv4_hdr, src_addr),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_10,
+ .src_reg = EBPF_REG_0,
+ .off = (int16_t)(offsetof(struct dummy_offset, u32) -
+ sizeof(struct dummy_offset)),
+ },
+ /* load IPv4 total length */
+ {
+ .code = (BPF_LD | BPF_ABS | BPF_H),
+ .imm = offsetof(struct rte_ipv4_hdr, total_length),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_8,
+ .src_reg = EBPF_REG_0,
+ },
+ /* load last 4 bytes of IP data */
+ {
+ .code = (BPF_LD | BPF_IND | BPF_W),
+ .src_reg = EBPF_REG_8,
+ .imm = -(int32_t)sizeof(uint32_t),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_10,
+ .src_reg = EBPF_REG_0,
+ .off = (int16_t)(offsetof(struct dummy_offset, u64) -
+ sizeof(struct dummy_offset)),
+ },
+ /* load 2 bytes from the middle of IP data */
+ {
+ .code = (EBPF_ALU64 | BPF_RSH | BPF_K),
+ .dst_reg = EBPF_REG_8,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_LD | BPF_IND | BPF_H),
+ .src_reg = EBPF_REG_8,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ .off = (int16_t)(offsetof(struct dummy_offset, u64) -
+ sizeof(struct dummy_offset)),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ .off = (int16_t)(offsetof(struct dummy_offset, u32) -
+ sizeof(struct dummy_offset)),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_B),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ .off = (int16_t)(offsetof(struct dummy_offset, u8) -
+ sizeof(struct dummy_offset)),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+/* all bpf test cases */
static const struct bpf_test tests[] = {
{
.name = "test_store1",
/* for now don't support function calls on 32 bit platform */
.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
},
+ {
+ .name = "test_ld_mbuf1",
+ .arg_sz = sizeof(struct dummy_mbuf),
+ .prm = {
+ .ins = test_ld_mbuf1_prog,
+ .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR_MBUF,
+ .buf_size = sizeof(struct dummy_mbuf),
+ },
+ },
+ .prepare = test_ld_mbuf1_prepare,
+ .check_result = test_ld_mbuf1_check,
+ /* mbuf as input argument is not supported on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
+ {
+ .name = "test_ld_mbuf2",
+ .arg_sz = sizeof(struct dummy_mbuf),
+ .prm = {
+ .ins = test_ld_mbuf1_prog,
+ .nb_ins = RTE_DIM(test_ld_mbuf1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR_MBUF,
+ .buf_size = sizeof(struct dummy_mbuf),
+ },
+ },
+ .prepare = test_ld_mbuf2_prepare,
+ .check_result = test_ld_mbuf2_check,
+ /* mbuf as input argument is not supported on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
+ {
+ .name = "test_ld_mbuf3",
+ .arg_sz = sizeof(struct dummy_mbuf),
+ .prm = {
+ .ins = test_ld_mbuf3_prog,
+ .nb_ins = RTE_DIM(test_ld_mbuf3_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR_MBUF,
+ .buf_size = sizeof(struct dummy_mbuf),
+ },
+ },
+ .prepare = test_ld_mbuf1_prepare,
+ .check_result = test_ld_mbuf1_check,
+ /* mbuf as input argument is not supported on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
};
static int
}
tst->prepare(tbuf);
-
rc = rte_bpf_exec(bpf, tbuf);
ret = tst->check_result(rc, tbuf);
if (ret != 0) {
__func__, __LINE__, tst->name, ret, strerror(ret));
}
+ /* repeat the same test with jit, when possible */
rte_bpf_get_jit(bpf, &jit);
- if (jit.func == NULL)
- return 0;
-
- tst->prepare(tbuf);
- rc = jit.func(tbuf);
- rv = tst->check_result(rc, tbuf);
- ret |= rv;
- if (rv != 0) {
- printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
- __func__, __LINE__, tst->name, rv, strerror(ret));
+ if (jit.func != NULL) {
+
+ tst->prepare(tbuf);
+ rc = jit.func(tbuf);
+ rv = tst->check_result(rc, tbuf);
+ ret |= rv;
+ if (rv != 0) {
+ printf("%s@%d: check_result(%s) failed, "
+ "error: %d(%s);\n",
+ __func__, __LINE__, tst->name,
+ rv, strerror(rv));
+ }
}
rte_bpf_destroy(bpf);
return rc;
}
+#endif /* !RTE_LIB_BPF */
+
REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
+
+#ifndef RTE_HAS_LIBPCAP
+
+static int
+test_bpf_convert(void)
+{
+ printf("BPF convert RTE_HAS_LIBPCAP is undefined, skipping test\n");
+ return TEST_SKIPPED;
+}
+
+#else
+#include <pcap/pcap.h>
+
+static void
+test_bpf_dump(struct bpf_program *cbf, const struct rte_bpf_prm *prm)
+{
+ printf("cBPF program (%u insns)\n", cbf->bf_len);
+ bpf_dump(cbf, 1);
+
+ if (prm != NULL) {
+ printf("\neBPF program (%u insns)\n", prm->nb_ins);
+ rte_bpf_dump(stdout, prm->ins, prm->nb_ins);
+ }
+}
+
+static int
+test_bpf_match(pcap_t *pcap, const char *str,
+ struct rte_mbuf *mb)
+{
+ struct bpf_program fcode;
+ struct rte_bpf_prm *prm = NULL;
+ struct rte_bpf *bpf = NULL;
+ int ret = -1;
+ uint64_t rc;
+
+ if (pcap_compile(pcap, &fcode, str, 1, PCAP_NETMASK_UNKNOWN)) {
+ printf("%s@%d: pcap_compile(\"%s\") failed: %s;\n",
+ __func__, __LINE__, str, pcap_geterr(pcap));
+ return -1;
+ }
+
+ prm = rte_bpf_convert(&fcode);
+ if (prm == NULL) {
+ printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n",
+ __func__, __LINE__, str, rte_errno, strerror(rte_errno));
+ goto error;
+ }
+
+ bpf = rte_bpf_load(prm);
+ if (bpf == NULL) {
+ printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
+ __func__, __LINE__, rte_errno, strerror(rte_errno));
+ goto error;
+ }
+
+ rc = rte_bpf_exec(bpf, mb);
+ /* The return code from bpf capture filter is non-zero if matched */
+ ret = (rc == 0);
+error:
+ if (bpf)
+ rte_bpf_destroy(bpf);
+ rte_free(prm);
+ pcap_freecode(&fcode);
+ return ret;
+}
+
+/* Basic sanity test can we match a IP packet */
+static int
+test_bpf_filter_sanity(pcap_t *pcap)
+{
+ const uint32_t plen = 100;
+ struct rte_mbuf mb, *m;
+ uint8_t tbuf[RTE_MBUF_DEFAULT_BUF_SIZE];
+ struct {
+ struct rte_ether_hdr eth_hdr;
+ struct rte_ipv4_hdr ip_hdr;
+ } *hdr;
+
+ dummy_mbuf_prep(&mb, tbuf, sizeof(tbuf), plen);
+ m = &mb;
+
+ hdr = rte_pktmbuf_mtod(m, typeof(hdr));
+ hdr->eth_hdr = (struct rte_ether_hdr) {
+ .dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4),
+ };
+ hdr->ip_hdr = (struct rte_ipv4_hdr) {
+ .version_ihl = RTE_IPV4_VHL_DEF,
+ .total_length = rte_cpu_to_be_16(plen),
+ .time_to_live = IPDEFTTL,
+ .next_proto_id = IPPROTO_RAW,
+ .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
+ .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
+ };
+
+ if (test_bpf_match(pcap, "ip", m) != 0) {
+ printf("%s@%d: filter \"ip\" doesn't match test data\n",
+ __func__, __LINE__);
+ return -1;
+ }
+ if (test_bpf_match(pcap, "not ip", m) == 0) {
+ printf("%s@%d: filter \"not ip\" does match test data\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Some sample pcap filter strings from
+ * https://wiki.wireshark.org/CaptureFilters
+ */
+static const char * const sample_filters[] = {
+ "host 172.18.5.4",
+ "net 192.168.0.0/24",
+ "src net 192.168.0.0/24",
+ "src net 192.168.0.0 mask 255.255.255.0",
+ "dst net 192.168.0.0/24",
+ "dst net 192.168.0.0 mask 255.255.255.0",
+ "port 53",
+ "host 192.0.2.1 and not (port 80 or port 25)",
+ "host 2001:4b98:db0::8 and not port 80 and not port 25",
+ "port not 53 and not arp",
+ "(tcp[0:2] > 1500 and tcp[0:2] < 1550) or (tcp[2:2] > 1500 and tcp[2:2] < 1550)",
+ "ether proto 0x888e",
+ "ether[0] & 1 = 0 and ip[16] >= 224",
+ "icmp[icmptype] != icmp-echo and icmp[icmptype] != icmp-echoreply",
+ "tcp[tcpflags] & (tcp-syn|tcp-fin) != 0 and not src and dst net 127.0.0.1",
+ "not ether dst 01:80:c2:00:00:0e",
+ "not broadcast and not multicast",
+ "dst host ff02::1",
+ "port 80 and tcp[((tcp[12:1] & 0xf0) >> 2):4] = 0x47455420",
+ /* Worms */
+ "dst port 135 and tcp port 135 and ip[2:2]==48",
+ "icmp[icmptype]==icmp-echo and ip[2:2]==92 and icmp[8:4]==0xAAAAAAAA",
+ "dst port 135 or dst port 445 or dst port 1433"
+ " and tcp[tcpflags] & (tcp-syn) != 0"
+ " and tcp[tcpflags] & (tcp-ack) = 0 and src net 192.168.0.0/24",
+ "tcp src port 443 and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4] = 0x18)"
+ " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 1] = 0x03)"
+ " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 2] < 0x04)"
+ " and ((ip[2:2] - 4 * (ip[0] & 0x0F) - 4 * ((tcp[12] & 0xF0) >> 4) > 69))",
+ /* Other */
+ "len = 128",
+};
+
+static int
+test_bpf_filter(pcap_t *pcap, const char *s)
+{
+ struct bpf_program fcode;
+ struct rte_bpf_prm *prm = NULL;
+ struct rte_bpf *bpf = NULL;
+
+ if (pcap_compile(pcap, &fcode, s, 1, PCAP_NETMASK_UNKNOWN)) {
+ printf("%s@%d: pcap_compile('%s') failed: %s;\n",
+ __func__, __LINE__, s, pcap_geterr(pcap));
+ return -1;
+ }
+
+ prm = rte_bpf_convert(&fcode);
+ if (prm == NULL) {
+ printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n",
+ __func__, __LINE__, s, rte_errno, strerror(rte_errno));
+ goto error;
+ }
+
+ bpf = rte_bpf_load(prm);
+ if (bpf == NULL) {
+ printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
+ __func__, __LINE__, rte_errno, strerror(rte_errno));
+ goto error;
+ }
+
+error:
+ if (bpf)
+ rte_bpf_destroy(bpf);
+ else {
+ printf("%s \"%s\"\n", __func__, s);
+ test_bpf_dump(&fcode, prm);
+ }
+
+ rte_free(prm);
+ pcap_freecode(&fcode);
+ return (bpf == NULL) ? -1 : 0;
+}
+
+static int
+test_bpf_convert(void)
+{
+ unsigned int i;
+ pcap_t *pcap;
+ int rc;
+
+ pcap = pcap_open_dead(DLT_EN10MB, 262144);
+ if (!pcap) {
+ printf("pcap_open_dead failed\n");
+ return -1;
+ }
+
+ rc = test_bpf_filter_sanity(pcap);
+ for (i = 0; i < RTE_DIM(sample_filters); i++)
+ rc |= test_bpf_filter(pcap, sample_filters[i]);
+
+ pcap_close(pcap);
+ return rc;
+}
+
+#endif /* RTE_HAS_LIBPCAP */
+
+REGISTER_TEST_COMMAND(bpf_convert_autotest, test_bpf_convert);