#include <rte_memory.h>
#include <rte_debug.h>
#include <rte_hexdump.h>
+#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
}
REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
+
+#ifdef RTE_PORT_PCAP
+#include <pcap/pcap.h>
+
+static void
+test_bpf_dump(struct bpf_program *cbf, const struct rte_bpf_prm *prm)
+{
+ printf("cBPF program (%u insns)\n", cbf->bf_len);
+ bpf_dump(cbf, 1);
+
+ printf("\neBPF program (%u insns)\n", prm->nb_ins);
+ rte_bpf_dump(stdout, prm->ins, prm->nb_ins);
+}
+
+static int
+test_bpf_match(pcap_t *pcap, const char *str,
+ struct rte_mbuf *mb)
+{
+ struct bpf_program fcode;
+ struct rte_bpf_prm *prm = NULL;
+ struct rte_bpf *bpf = NULL;
+ int ret = -1;
+ uint64_t rc;
+
+ if (pcap_compile(pcap, &fcode, str, 1, PCAP_NETMASK_UNKNOWN)) {
+ printf("%s@%d: pcap_compile(\"%s\") failed: %s;\n",
+ __func__, __LINE__, str, pcap_geterr(pcap));
+ return -1;
+ }
+
+ prm = rte_bpf_convert(&fcode);
+ if (prm == NULL) {
+ printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n",
+ __func__, __LINE__, str, rte_errno, strerror(rte_errno));
+ goto error;
+ }
+
+ bpf = rte_bpf_load(prm);
+ if (bpf == NULL) {
+ printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
+ __func__, __LINE__, rte_errno, strerror(rte_errno));
+ goto error;
+ }
+
+ rc = rte_bpf_exec(bpf, mb);
+ /* The return code from bpf capture filter is non-zero if matched */
+ ret = (rc == 0);
+error:
+ if (bpf)
+ rte_bpf_destroy(bpf);
+ rte_free(prm);
+ pcap_freecode(&fcode);
+ return ret;
+}
+
+/* Basic sanity test can we match a IP packet */
+static int
+test_bpf_filter_sanity(pcap_t *pcap)
+{
+ const uint32_t plen = 100;
+ struct rte_mbuf mb, *m;
+ uint8_t tbuf[RTE_MBUF_DEFAULT_BUF_SIZE];
+ struct {
+ struct rte_ether_hdr eth_hdr;
+ struct rte_ipv4_hdr ip_hdr;
+ } *hdr;
+
+ dummy_mbuf_prep(&mb, tbuf, sizeof(tbuf), plen);
+ m = &mb;
+
+ hdr = rte_pktmbuf_mtod(m, typeof(hdr));
+ hdr->eth_hdr = (struct rte_ether_hdr) {
+ .dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4),
+ };
+ hdr->ip_hdr = (struct rte_ipv4_hdr) {
+ .version_ihl = RTE_IPV4_VHL_DEF,
+ .total_length = rte_cpu_to_be_16(plen),
+ .time_to_live = IPDEFTTL,
+ .next_proto_id = IPPROTO_RAW,
+ .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
+ .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
+ };
+
+ if (test_bpf_match(pcap, "ip", m) != 0) {
+ printf("%s@%d: filter \"ip\" doesn't match test data\n",
+ __func__, __LINE__);
+ return -1;
+ }
+ if (test_bpf_match(pcap, "not ip", m) == 0) {
+ printf("%s@%d: filter \"not ip\" does match test data\n",
+ __func__, __LINE__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Some sample pcap filter strings from
+ * https://wiki.wireshark.org/CaptureFilters
+ */
+static const char * const sample_filters[] = {
+ "host 172.18.5.4",
+ "net 192.168.0.0/24",
+ "src net 192.168.0.0/24",
+ "src net 192.168.0.0 mask 255.255.255.0",
+ "dst net 192.168.0.0/24",
+ "dst net 192.168.0.0 mask 255.255.255.0",
+ "port 53",
+ "host dpdk.org and not (port 80 or port 25)",
+ "host dpdk.org and not port 80 and not port 25",
+ "port not 53 and not arp",
+ "(tcp[0:2] > 1500 and tcp[0:2] < 1550) or (tcp[2:2] > 1500 and tcp[2:2] < 1550)",
+ "ether proto 0x888e",
+ "ether[0] & 1 = 0 and ip[16] >= 224",
+ "icmp[icmptype] != icmp-echo and icmp[icmptype] != icmp-echoreply",
+ "tcp[tcpflags] & (tcp-syn|tcp-fin) != 0 and not src and dst net 127.0.0.1",
+ "not ether dst 01:80:c2:00:00:0e",
+ "not broadcast and not multicast",
+ "dst host ff02::1",
+ "port 80 and tcp[((tcp[12:1] & 0xf0) >> 2):4] = 0x47455420",
+ /* Worms */
+ "dst port 135 and tcp port 135 and ip[2:2]==48",
+ "icmp[icmptype]==icmp-echo and ip[2:2]==92 and icmp[8:4]==0xAAAAAAAA",
+ "dst port 135 or dst port 445 or dst port 1433"
+ " and tcp[tcpflags] & (tcp-syn) != 0"
+ " and tcp[tcpflags] & (tcp-ack) = 0 and src net 192.168.0.0/24",
+ "tcp src port 443 and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4] = 0x18)"
+ " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 1] = 0x03)"
+ " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 2] < 0x04)"
+ " and ((ip[2:2] - 4 * (ip[0] & 0x0F) - 4 * ((tcp[12] & 0xF0) >> 4) > 69))",
+ /* Other */
+ "len = 128",
+};
+
+static int
+test_bpf_filter(pcap_t *pcap, const char *s)
+{
+ struct bpf_program fcode;
+ struct rte_bpf_prm *prm = NULL;
+ struct rte_bpf *bpf = NULL;
+
+ if (pcap_compile(pcap, &fcode, s, 1, PCAP_NETMASK_UNKNOWN)) {
+ printf("%s@%d: pcap_compile('%s') failed: %s;\n",
+ __func__, __LINE__, s, pcap_geterr(pcap));
+ return -1;
+ }
+
+ prm = rte_bpf_convert(&fcode);
+ if (prm == NULL) {
+ printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n",
+ __func__, __LINE__, s, rte_errno, strerror(rte_errno));
+ goto error;
+ }
+
+ bpf = rte_bpf_load(prm);
+ if (bpf == NULL) {
+ printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
+ __func__, __LINE__, rte_errno, strerror(rte_errno));
+ goto error;
+ }
+
+error:
+ if (bpf)
+ rte_bpf_destroy(bpf);
+ else {
+ printf("%s \"%s\"\n", __func__, s);
+ test_bpf_dump(&fcode, prm);
+ }
+
+ rte_free(prm);
+ pcap_freecode(&fcode);
+ return (bpf == NULL) ? -1 : 0;
+}
+
+static int
+test_bpf_convert(void)
+{
+ unsigned int i;
+ pcap_t *pcap;
+ int rc;
+
+ pcap = pcap_open_dead(DLT_EN10MB, 262144);
+ if (!pcap) {
+ printf("pcap_open_dead failed\n");
+ return -1;
+ }
+
+ rc = test_bpf_filter_sanity(pcap);
+ for (i = 0; i < RTE_DIM(sample_filters); i++)
+ rc |= test_bpf_filter(pcap, sample_filters[i]);
+
+ pcap_close(pcap);
+ return rc;
+}
+
+REGISTER_TEST_COMMAND(bpf_convert_autotest, test_bpf_convert);
+#endif /* RTE_PORT_PCAP */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Microsoft Corporation
+ *
+ * Based on bpf_convert_filter() in the Linux kernel sources
+ * and filter2xdp.
+ *
+ * Licensed as BSD with permission original authors.
+ * Copyright (C) 2017 Tobias Klauser
+ * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_bpf.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+
+/* Workaround name conflicts with libpcap */
+#define bpf_validate(f, len) bpf_validate_libpcap(f, len)
+#include <pcap/pcap.h>
+#include <pcap/bpf.h>
+#undef bpf_validate
+
+#include "bpf_impl.h"
+#include "bpf_def.h"
+
+#ifndef BPF_MAXINSNS
+#define BPF_MAXINSNS 4096
+#endif
+
+/*
+ * Linux socket filter uses negative absolute offsets to
+ * reference ancillary data.
+ */
+#define SKF_AD_OFF (-0x1000)
+#define SKF_AD_PROTOCOL 0
+#define SKF_AD_PKTTYPE 4
+#define SKF_AD_IFINDEX 8
+#define SKF_AD_NLATTR 12
+#define SKF_AD_NLATTR_NEST 16
+#define SKF_AD_MARK 20
+#define SKF_AD_QUEUE 24
+#define SKF_AD_HATYPE 28
+#define SKF_AD_RXHASH 32
+#define SKF_AD_CPU 36
+#define SKF_AD_ALU_XOR_X 40
+#define SKF_AD_VLAN_TAG 44
+#define SKF_AD_VLAN_TAG_PRESENT 48
+#define SKF_AD_PAY_OFFSET 52
+#define SKF_AD_RANDOM 56
+#define SKF_AD_VLAN_TPID 60
+#define SKF_AD_MAX 64
+
+/* ArgX, context and stack frame pointer register positions. Note,
+ * Arg1, Arg2, Arg3, etc are used as argument mappings of function
+ * calls in BPF_CALL instruction.
+ */
+#define BPF_REG_ARG1 EBPF_REG_1
+#define BPF_REG_ARG2 EBPF_REG_2
+#define BPF_REG_ARG3 EBPF_REG_3
+#define BPF_REG_ARG4 EBPF_REG_4
+#define BPF_REG_ARG5 EBPF_REG_5
+#define BPF_REG_CTX EBPF_REG_6
+#define BPF_REG_FP EBPF_REG_10
+
+/* Additional register mappings for converted user programs. */
+#define BPF_REG_A EBPF_REG_0
+#define BPF_REG_X EBPF_REG_7
+#define BPF_REG_TMP EBPF_REG_8
+
+/* Helper macros for filter block array initializers. */
+
+/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
+
+#define EBPF_ALU64_REG(OP, DST, SRC) \
+ ((struct ebpf_insn) { \
+ .code = EBPF_ALU64 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+#define BPF_ALU32_REG(OP, DST, SRC) \
+ ((struct ebpf_insn) { \
+ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
+
+#define BPF_ALU32_IMM(OP, DST, IMM) \
+ ((struct ebpf_insn) { \
+ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Short form of mov, dst_reg = src_reg */
+
+#define BPF_MOV64_REG(DST, SRC) \
+ ((struct ebpf_insn) { \
+ .code = EBPF_ALU64 | EBPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+#define BPF_MOV32_REG(DST, SRC) \
+ ((struct ebpf_insn) { \
+ .code = BPF_ALU | EBPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+/* Short form of mov, dst_reg = imm32 */
+
+#define BPF_MOV32_IMM(DST, IMM) \
+ ((struct ebpf_insn) { \
+ .code = BPF_ALU | EBPF_MOV | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
+
+#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
+ ((struct ebpf_insn) { \
+ .code = BPF_ALU | EBPF_MOV | BPF_SRC(TYPE), \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
+
+#define BPF_LD_ABS(SIZE, IMM) \
+ ((struct ebpf_insn) { \
+ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
+
+#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
+ ((struct ebpf_insn) { \
+ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
+
+#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
+ ((struct ebpf_insn) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
+
+#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
+ ((struct ebpf_insn) { \
+ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Raw code statement block */
+
+#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
+ ((struct ebpf_insn) { \
+ .code = CODE, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Program exit */
+
+#define BPF_EXIT_INSN() \
+ ((struct ebpf_insn) { \
+ .code = BPF_JMP | EBPF_EXIT, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
+/*
+ * Placeholder to convert BPF extensions like length and VLAN tag
+ * If and when DPDK BPF supports them.
+ */
+static bool convert_bpf_load(const struct bpf_insn *fp,
+ struct ebpf_insn **new_insnp __rte_unused)
+{
+ switch (fp->k) {
+ case SKF_AD_OFF + SKF_AD_PROTOCOL:
+ case SKF_AD_OFF + SKF_AD_PKTTYPE:
+ case SKF_AD_OFF + SKF_AD_IFINDEX:
+ case SKF_AD_OFF + SKF_AD_HATYPE:
+ case SKF_AD_OFF + SKF_AD_MARK:
+ case SKF_AD_OFF + SKF_AD_RXHASH:
+ case SKF_AD_OFF + SKF_AD_QUEUE:
+ case SKF_AD_OFF + SKF_AD_VLAN_TAG:
+ case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
+ case SKF_AD_OFF + SKF_AD_VLAN_TPID:
+ case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
+ case SKF_AD_OFF + SKF_AD_NLATTR:
+ case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
+ case SKF_AD_OFF + SKF_AD_CPU:
+ case SKF_AD_OFF + SKF_AD_RANDOM:
+ case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
+ /* Linux has special negative offsets to access meta-data. */
+ RTE_BPF_LOG(ERR,
+ "rte_bpf_convert: socket offset %d not supported\n",
+ fp->k - SKF_AD_OFF);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int bpf_convert_filter(const struct bpf_insn *prog, size_t len,
+ struct ebpf_insn *new_prog, uint32_t *new_len)
+{
+ unsigned int pass = 0;
+ size_t new_flen = 0, target, i;
+ struct ebpf_insn *new_insn;
+ const struct bpf_insn *fp;
+ int *addrs = NULL;
+ uint8_t bpf_src;
+
+ if (len > BPF_MAXINSNS) {
+ RTE_BPF_LOG(ERR, "%s: cBPF program too long (%zu insns)\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ /* On second pass, allocate the new program */
+ if (new_prog) {
+ addrs = calloc(len, sizeof(*addrs));
+ if (addrs == NULL)
+ return -ENOMEM;
+ }
+
+do_pass:
+ new_insn = new_prog;
+ fp = prog;
+
+ /* Classic BPF related prologue emission. */
+ if (new_insn) {
+ /* Classic BPF expects A and X to be reset first. These need
+ * to be guaranteed to be the first two instructions.
+ */
+ *new_insn++ = EBPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
+ *new_insn++ = EBPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
+
+ /* All programs must keep CTX in callee saved BPF_REG_CTX.
+ * In eBPF case it's done by the compiler, here we need to
+ * do this ourself. Initial CTX is present in BPF_REG_ARG1.
+ */
+ *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
+ } else {
+ new_insn += 3;
+ }
+
+ for (i = 0; i < len; fp++, i++) {
+ struct ebpf_insn tmp_insns[6] = { };
+ struct ebpf_insn *insn = tmp_insns;
+
+ if (addrs)
+ addrs[i] = new_insn - new_prog;
+
+ switch (fp->code) {
+ /* Absolute loads are how classic BPF accesses skb */
+ case BPF_LD | BPF_ABS | BPF_W:
+ case BPF_LD | BPF_ABS | BPF_H:
+ case BPF_LD | BPF_ABS | BPF_B:
+ if (convert_bpf_load(fp, &insn))
+ goto err;
+
+ *insn = BPF_RAW_INSN(fp->code, 0, 0, 0, fp->k);
+ break;
+
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU | BPF_MOD | BPF_X:
+ /* For cBPF, don't cause floating point exception */
+ *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
+ *insn++ = BPF_JMP_IMM(EBPF_JNE, BPF_REG_X, 0, 2);
+ *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
+ *insn++ = BPF_EXIT_INSN();
+ /* fallthrough */
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU | BPF_RSH | BPF_X:
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU | BPF_XOR | BPF_X:
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU | BPF_MUL | BPF_K:
+ case BPF_ALU | BPF_DIV | BPF_K:
+ case BPF_ALU | BPF_MOD | BPF_K:
+ case BPF_ALU | BPF_NEG:
+ case BPF_LD | BPF_IND | BPF_W:
+ case BPF_LD | BPF_IND | BPF_H:
+ case BPF_LD | BPF_IND | BPF_B:
+ /* All arithmetic insns map as-is. */
+ insn->code = fp->code;
+ insn->dst_reg = BPF_REG_A;
+ bpf_src = BPF_SRC(fp->code);
+ insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
+ insn->off = 0;
+ insn->imm = fp->k;
+ break;
+
+ /* Jump transformation cannot use BPF block macros
+ * everywhere as offset calculation and target updates
+ * require a bit more work than the rest, i.e. jump
+ * opcodes map as-is, but offsets need adjustment.
+ */
+
+#define BPF_EMIT_JMP \
+ do { \
+ if (target >= len) \
+ goto err; \
+ insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
+ /* Adjust pc relative offset for 2nd or 3rd insn. */ \
+ insn->off -= insn - tmp_insns; \
+ } while (0)
+
+ case BPF_JMP | BPF_JA:
+ target = i + fp->k + 1;
+ insn->code = fp->code;
+ BPF_EMIT_JMP;
+ break;
+
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
+ /* BPF immediates are signed, zero extend
+ * immediate into tmp register and use it
+ * in compare insn.
+ */
+ *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
+
+ insn->dst_reg = BPF_REG_A;
+ insn->src_reg = BPF_REG_TMP;
+ bpf_src = BPF_X;
+ } else {
+ insn->dst_reg = BPF_REG_A;
+ insn->imm = fp->k;
+ bpf_src = BPF_SRC(fp->code);
+ insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
+ }
+
+ /* Common case where 'jump_false' is next insn. */
+ if (fp->jf == 0) {
+ insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
+ target = i + fp->jt + 1;
+ BPF_EMIT_JMP;
+ break;
+ }
+
+ /* Convert JEQ into JNE when 'jump_true' is next insn. */
+ if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
+ insn->code = BPF_JMP | EBPF_JNE | bpf_src;
+ target = i + fp->jf + 1;
+ BPF_EMIT_JMP;
+ break;
+ }
+
+ /* Other jumps are mapped into two insns: Jxx and JA. */
+ target = i + fp->jt + 1;
+ insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
+ BPF_EMIT_JMP;
+ insn++;
+
+ insn->code = BPF_JMP | BPF_JA;
+ target = i + fp->jf + 1;
+ BPF_EMIT_JMP;
+ break;
+
+ /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
+ case BPF_LDX | BPF_MSH | BPF_B:
+ /* tmp = A */
+ *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
+ /* A = BPF_R0 = *(u8 *) (skb->data + K) */
+ *insn++ = BPF_LD_ABS(BPF_B, fp->k);
+ /* A &= 0xf */
+ *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
+ /* A <<= 2 */
+ *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
+ /* X = A */
+ *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
+ /* A = tmp */
+ *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
+ break;
+
+ /* RET_K is remaped into 2 insns. RET_A case doesn't need an
+ * extra mov as EBPF_REG_0 is already mapped into BPF_REG_A.
+ */
+ case BPF_RET | BPF_A:
+ case BPF_RET | BPF_K:
+ if (BPF_RVAL(fp->code) == BPF_K) {
+ *insn++ = BPF_MOV32_RAW(BPF_K, EBPF_REG_0,
+ 0, fp->k);
+ }
+ *insn = BPF_EXIT_INSN();
+ break;
+
+ /* Store to stack. */
+ case BPF_ST:
+ case BPF_STX:
+ *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
+ BPF_ST ? BPF_REG_A : BPF_REG_X,
+ -(BPF_MEMWORDS - fp->k) * 4);
+ break;
+
+ /* Load from stack. */
+ case BPF_LD | BPF_MEM:
+ case BPF_LDX | BPF_MEM:
+ *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
+ BPF_REG_A : BPF_REG_X, BPF_REG_FP,
+ -(BPF_MEMWORDS - fp->k) * 4);
+ break;
+
+ /* A = K or X = K */
+ case BPF_LD | BPF_IMM:
+ case BPF_LDX | BPF_IMM:
+ *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
+ BPF_REG_A : BPF_REG_X, fp->k);
+ break;
+
+ /* X = A */
+ case BPF_MISC | BPF_TAX:
+ *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
+ break;
+
+ /* A = X */
+ case BPF_MISC | BPF_TXA:
+ *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
+ break;
+
+ /* A = mbuf->len or X = mbuf->len */
+ case BPF_LD | BPF_W | BPF_LEN:
+ case BPF_LDX | BPF_W | BPF_LEN:
+ /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
+
+ *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
+ BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
+ offsetof(struct rte_mbuf, pkt_len));
+ break;
+
+ /* Unknown instruction. */
+ default:
+ RTE_BPF_LOG(ERR, "%s: Unknown instruction!: %#x\n",
+ __func__, fp->code);
+ goto err;
+ }
+
+ insn++;
+ if (new_prog)
+ memcpy(new_insn, tmp_insns,
+ sizeof(*insn) * (insn - tmp_insns));
+ new_insn += insn - tmp_insns;
+ }
+
+ if (!new_prog) {
+ /* Only calculating new length. */
+ *new_len = new_insn - new_prog;
+ return 0;
+ }
+
+ pass++;
+ if ((ptrdiff_t)new_flen != new_insn - new_prog) {
+ new_flen = new_insn - new_prog;
+ if (pass > 2)
+ goto err;
+ goto do_pass;
+ }
+
+ free(addrs);
+ assert(*new_len == new_flen);
+
+ return 0;
+err:
+ free(addrs);
+ return -1;
+}
+
+struct rte_bpf_prm *
+rte_bpf_convert(const struct bpf_program *prog)
+{
+ struct rte_bpf_prm *prm = NULL;
+ struct ebpf_insn *ebpf = NULL;
+ uint32_t ebpf_len = 0;
+ int ret;
+
+ if (prog == NULL) {
+ RTE_BPF_LOG(ERR, "%s: NULL program\n", __func__);
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* 1st pass: calculate the eBPF program length */
+ ret = bpf_convert_filter(prog->bf_insns, prog->bf_len, NULL, &ebpf_len);
+ if (ret < 0) {
+ RTE_BPF_LOG(ERR, "%s: cannot get eBPF length\n", __func__);
+ rte_errno = -ret;
+ return NULL;
+ }
+
+ RTE_BPF_LOG(DEBUG, "%s: prog len cBPF=%u -> eBPF=%u\n",
+ __func__, prog->bf_len, ebpf_len);
+
+ prm = rte_zmalloc("bpf_filter",
+ sizeof(*prm) + ebpf_len * sizeof(*ebpf), 0);
+ if (prm == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ /* The EPBF instructions in this case are right after the header */
+ ebpf = (void *)(prm + 1);
+
+ /* 2nd pass: remap cBPF to eBPF instructions */
+ ret = bpf_convert_filter(prog->bf_insns, prog->bf_len, ebpf, &ebpf_len);
+ if (ret < 0) {
+ RTE_BPF_LOG(ERR, "%s: cannot convert cBPF to eBPF\n", __func__);
+ free(prm);
+ rte_errno = -ret;
+ return NULL;
+ }
+
+ prm->ins = ebpf;
+ prm->nb_ins = ebpf_len;
+
+ /* Classic BPF programs use mbufs */
+ prm->prog_arg.type = RTE_BPF_ARG_PTR_MBUF;
+ prm->prog_arg.size = sizeof(struct rte_mbuf);
+
+ return prm;
+}