--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+
+#include "bpf_impl.h"
+
+#define BPF_JMP_UNC(ins) ((ins) += (ins)->off)
+
+#define BPF_JMP_CND_REG(reg, ins, op, type) \
+ ((ins) += \
+ ((type)(reg)[(ins)->dst_reg] op (type)(reg)[(ins)->src_reg]) ? \
+ (ins)->off : 0)
+
+#define BPF_JMP_CND_IMM(reg, ins, op, type) \
+ ((ins) += \
+ ((type)(reg)[(ins)->dst_reg] op (type)(ins)->imm) ? \
+ (ins)->off : 0)
+
+#define BPF_NEG_ALU(reg, ins, type) \
+ ((reg)[(ins)->dst_reg] = (type)(-(reg)[(ins)->dst_reg]))
+
+#define EBPF_MOV_ALU_REG(reg, ins, type) \
+ ((reg)[(ins)->dst_reg] = (type)(reg)[(ins)->src_reg])
+
+#define BPF_OP_ALU_REG(reg, ins, op, type) \
+ ((reg)[(ins)->dst_reg] = \
+ (type)(reg)[(ins)->dst_reg] op (type)(reg)[(ins)->src_reg])
+
+#define EBPF_MOV_ALU_IMM(reg, ins, type) \
+ ((reg)[(ins)->dst_reg] = (type)(ins)->imm)
+
+#define BPF_OP_ALU_IMM(reg, ins, op, type) \
+ ((reg)[(ins)->dst_reg] = \
+ (type)(reg)[(ins)->dst_reg] op (type)(ins)->imm)
+
+#define BPF_DIV_ZERO_CHECK(bpf, reg, ins, type) do { \
+ if ((type)(reg)[(ins)->src_reg] == 0) { \
+ RTE_BPF_LOG(ERR, \
+ "%s(%p): division by 0 at pc: %#zx;\n", \
+ __func__, bpf, \
+ (uintptr_t)(ins) - (uintptr_t)(bpf)->prm.ins); \
+ return 0; \
+ } \
+} while (0)
+
+#define BPF_LD_REG(reg, ins, type) \
+ ((reg)[(ins)->dst_reg] = \
+ *(type *)(uintptr_t)((reg)[(ins)->src_reg] + (ins)->off))
+
+#define BPF_ST_IMM(reg, ins, type) \
+ (*(type *)(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off) = \
+ (type)(ins)->imm)
+
+#define BPF_ST_REG(reg, ins, type) \
+ (*(type *)(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off) = \
+ (type)(reg)[(ins)->src_reg])
+
+#define BPF_ST_XADD_REG(reg, ins, tp) \
+ (rte_atomic##tp##_add((rte_atomic##tp##_t *) \
+ (uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off), \
+ reg[ins->src_reg]))
+
+static inline void
+bpf_alu_be(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
+{
+ uint64_t *v;
+
+ v = reg + ins->dst_reg;
+ switch (ins->imm) {
+ case 16:
+ *v = rte_cpu_to_be_16(*v);
+ break;
+ case 32:
+ *v = rte_cpu_to_be_32(*v);
+ break;
+ case 64:
+ *v = rte_cpu_to_be_64(*v);
+ break;
+ }
+}
+
+static inline void
+bpf_alu_le(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
+{
+ uint64_t *v;
+
+ v = reg + ins->dst_reg;
+ switch (ins->imm) {
+ case 16:
+ *v = rte_cpu_to_le_16(*v);
+ break;
+ case 32:
+ *v = rte_cpu_to_le_32(*v);
+ break;
+ case 64:
+ *v = rte_cpu_to_le_64(*v);
+ break;
+ }
+}
+
+static inline uint64_t
+bpf_exec(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM])
+{
+ const struct ebpf_insn *ins;
+
+ for (ins = bpf->prm.ins; ; ins++) {
+ switch (ins->code) {
+ /* 32 bit ALU IMM operations */
+ case (BPF_ALU | BPF_ADD | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, +, uint32_t);
+ break;
+ case (BPF_ALU | BPF_SUB | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, -, uint32_t);
+ break;
+ case (BPF_ALU | BPF_AND | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, &, uint32_t);
+ break;
+ case (BPF_ALU | BPF_OR | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, |, uint32_t);
+ break;
+ case (BPF_ALU | BPF_LSH | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, <<, uint32_t);
+ break;
+ case (BPF_ALU | BPF_RSH | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, >>, uint32_t);
+ break;
+ case (BPF_ALU | BPF_XOR | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, ^, uint32_t);
+ break;
+ case (BPF_ALU | BPF_MUL | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, *, uint32_t);
+ break;
+ case (BPF_ALU | BPF_DIV | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, /, uint32_t);
+ break;
+ case (BPF_ALU | BPF_MOD | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, %, uint32_t);
+ break;
+ case (BPF_ALU | EBPF_MOV | BPF_K):
+ EBPF_MOV_ALU_IMM(reg, ins, uint32_t);
+ break;
+ /* 32 bit ALU REG operations */
+ case (BPF_ALU | BPF_ADD | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, +, uint32_t);
+ break;
+ case (BPF_ALU | BPF_SUB | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, -, uint32_t);
+ break;
+ case (BPF_ALU | BPF_AND | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, &, uint32_t);
+ break;
+ case (BPF_ALU | BPF_OR | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, |, uint32_t);
+ break;
+ case (BPF_ALU | BPF_LSH | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, <<, uint32_t);
+ break;
+ case (BPF_ALU | BPF_RSH | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, >>, uint32_t);
+ break;
+ case (BPF_ALU | BPF_XOR | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, ^, uint32_t);
+ break;
+ case (BPF_ALU | BPF_MUL | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, *, uint32_t);
+ break;
+ case (BPF_ALU | BPF_DIV | BPF_X):
+ BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint32_t);
+ BPF_OP_ALU_REG(reg, ins, /, uint32_t);
+ break;
+ case (BPF_ALU | BPF_MOD | BPF_X):
+ BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint32_t);
+ BPF_OP_ALU_REG(reg, ins, %, uint32_t);
+ break;
+ case (BPF_ALU | EBPF_MOV | BPF_X):
+ EBPF_MOV_ALU_REG(reg, ins, uint32_t);
+ break;
+ case (BPF_ALU | BPF_NEG):
+ BPF_NEG_ALU(reg, ins, uint32_t);
+ break;
+ case (BPF_ALU | EBPF_END | EBPF_TO_BE):
+ bpf_alu_be(reg, ins);
+ break;
+ case (BPF_ALU | EBPF_END | EBPF_TO_LE):
+ bpf_alu_le(reg, ins);
+ break;
+ /* 64 bit ALU IMM operations */
+ case (EBPF_ALU64 | BPF_ADD | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, +, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_SUB | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, -, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_AND | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, &, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_OR | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, |, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_LSH | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, <<, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_RSH | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, >>, uint64_t);
+ break;
+ case (EBPF_ALU64 | EBPF_ARSH | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, >>, int64_t);
+ break;
+ case (EBPF_ALU64 | BPF_XOR | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, ^, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_MUL | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, *, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_DIV | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, /, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_MOD | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, %, uint64_t);
+ break;
+ case (EBPF_ALU64 | EBPF_MOV | BPF_K):
+ EBPF_MOV_ALU_IMM(reg, ins, uint64_t);
+ break;
+ /* 64 bit ALU REG operations */
+ case (EBPF_ALU64 | BPF_ADD | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, +, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_SUB | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, -, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_AND | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, &, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_OR | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, |, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_LSH | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, <<, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_RSH | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, >>, uint64_t);
+ break;
+ case (EBPF_ALU64 | EBPF_ARSH | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, >>, int64_t);
+ break;
+ case (EBPF_ALU64 | BPF_XOR | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, ^, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_MUL | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, *, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_DIV | BPF_X):
+ BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint64_t);
+ BPF_OP_ALU_REG(reg, ins, /, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_MOD | BPF_X):
+ BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint64_t);
+ BPF_OP_ALU_REG(reg, ins, %, uint64_t);
+ break;
+ case (EBPF_ALU64 | EBPF_MOV | BPF_X):
+ EBPF_MOV_ALU_REG(reg, ins, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_NEG):
+ BPF_NEG_ALU(reg, ins, uint64_t);
+ break;
+ /* load instructions */
+ case (BPF_LDX | BPF_MEM | BPF_B):
+ BPF_LD_REG(reg, ins, uint8_t);
+ break;
+ case (BPF_LDX | BPF_MEM | BPF_H):
+ BPF_LD_REG(reg, ins, uint16_t);
+ break;
+ case (BPF_LDX | BPF_MEM | BPF_W):
+ BPF_LD_REG(reg, ins, uint32_t);
+ break;
+ case (BPF_LDX | BPF_MEM | EBPF_DW):
+ BPF_LD_REG(reg, ins, uint64_t);
+ break;
+ /* load 64 bit immediate value */
+ case (BPF_LD | BPF_IMM | EBPF_DW):
+ reg[ins->dst_reg] = (uint32_t)ins[0].imm |
+ (uint64_t)(uint32_t)ins[1].imm << 32;
+ ins++;
+ break;
+ /* store instructions */
+ case (BPF_STX | BPF_MEM | BPF_B):
+ BPF_ST_REG(reg, ins, uint8_t);
+ break;
+ case (BPF_STX | BPF_MEM | BPF_H):
+ BPF_ST_REG(reg, ins, uint16_t);
+ break;
+ case (BPF_STX | BPF_MEM | BPF_W):
+ BPF_ST_REG(reg, ins, uint32_t);
+ break;
+ case (BPF_STX | BPF_MEM | EBPF_DW):
+ BPF_ST_REG(reg, ins, uint64_t);
+ break;
+ case (BPF_ST | BPF_MEM | BPF_B):
+ BPF_ST_IMM(reg, ins, uint8_t);
+ break;
+ case (BPF_ST | BPF_MEM | BPF_H):
+ BPF_ST_IMM(reg, ins, uint16_t);
+ break;
+ case (BPF_ST | BPF_MEM | BPF_W):
+ BPF_ST_IMM(reg, ins, uint32_t);
+ break;
+ case (BPF_ST | BPF_MEM | EBPF_DW):
+ BPF_ST_IMM(reg, ins, uint64_t);
+ break;
+ /* atomic add instructions */
+ case (BPF_STX | EBPF_XADD | BPF_W):
+ BPF_ST_XADD_REG(reg, ins, 32);
+ break;
+ case (BPF_STX | EBPF_XADD | EBPF_DW):
+ BPF_ST_XADD_REG(reg, ins, 64);
+ break;
+ /* jump instructions */
+ case (BPF_JMP | BPF_JA):
+ BPF_JMP_UNC(ins);
+ break;
+ /* jump IMM instructions */
+ case (BPF_JMP | BPF_JEQ | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, ==, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JNE | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, !=, uint64_t);
+ break;
+ case (BPF_JMP | BPF_JGT | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, >, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JLT | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, <, uint64_t);
+ break;
+ case (BPF_JMP | BPF_JGE | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, >=, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JLE | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, <=, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JSGT | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, >, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSLT | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, <, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSGE | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, >=, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSLE | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, <=, int64_t);
+ break;
+ case (BPF_JMP | BPF_JSET | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, &, uint64_t);
+ break;
+ /* jump REG instructions */
+ case (BPF_JMP | BPF_JEQ | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, ==, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JNE | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, !=, uint64_t);
+ break;
+ case (BPF_JMP | BPF_JGT | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, >, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JLT | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, <, uint64_t);
+ break;
+ case (BPF_JMP | BPF_JGE | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, >=, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JLE | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, <=, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JSGT | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, >, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSLT | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, <, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSGE | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, >=, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSLE | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, <=, int64_t);
+ break;
+ case (BPF_JMP | BPF_JSET | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, &, uint64_t);
+ break;
+ /* call instructions */
+ case (BPF_JMP | EBPF_CALL):
+ reg[EBPF_REG_0] = bpf->prm.xsym[ins->imm].func(
+ reg[EBPF_REG_1], reg[EBPF_REG_2],
+ reg[EBPF_REG_3], reg[EBPF_REG_4],
+ reg[EBPF_REG_5]);
+ break;
+ /* return instruction */
+ case (BPF_JMP | EBPF_EXIT):
+ return reg[EBPF_REG_0];
+ default:
+ RTE_BPF_LOG(ERR,
+ "%s(%p): invalid opcode %#x at pc: %#zx;\n",
+ __func__, bpf, ins->code,
+ (uintptr_t)ins - (uintptr_t)bpf->prm.ins);
+ return 0;
+ }
+ }
+
+ /* should never be reached */
+ RTE_VERIFY(0);
+ return 0;
+}
+
+__rte_experimental uint32_t
+rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
+ uint32_t num)
+{
+ uint32_t i;
+ uint64_t reg[EBPF_REG_NUM];
+ uint64_t stack[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
+
+ for (i = 0; i != num; i++) {
+
+ reg[EBPF_REG_1] = (uintptr_t)ctx[i];
+ reg[EBPF_REG_10] = (uintptr_t)(stack + RTE_DIM(stack));
+
+ rc[i] = bpf_exec(bpf, reg);
+ }
+
+ return i;
+}
+
+__rte_experimental uint64_t
+rte_bpf_exec(const struct rte_bpf *bpf, void *ctx)
+{
+ uint64_t rc;
+
+ rte_bpf_exec_burst(bpf, &ctx, &rc, 1);
+ return rc;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_BPF_H_
+#define _RTE_BPF_H_
+
+/**
+ * @file
+ *
+ * RTE BPF support.
+ * librte_bpf provides a framework to load and execute eBPF bytecode
+ * inside user-space dpdk based applications.
+ * It supports basic set of features from eBPF spec
+ * (https://www.kernel.org/doc/Documentation/networking/filter.txt).
+ */
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <bpf_def.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Possible types for function/BPF program arguments.
+ */
+enum rte_bpf_arg_type {
+ RTE_BPF_ARG_UNDEF, /**< undefined */
+ RTE_BPF_ARG_RAW, /**< scalar value */
+ RTE_BPF_ARG_PTR = 0x10, /**< pointer to data buffer */
+ RTE_BPF_ARG_PTR_MBUF, /**< pointer to rte_mbuf */
+ RTE_BPF_ARG_PTR_STACK,
+};
+
+/**
+ * function argument information
+ */
+struct rte_bpf_arg {
+ enum rte_bpf_arg_type type;
+ size_t size; /**< for pointer types, size of data it points to */
+ size_t buf_size;
+ /**< for mbuf ptr type, max size of rte_mbuf data buffer */
+};
+
+/**
+ * determine is argument a pointer
+ */
+#define RTE_BPF_ARG_PTR_TYPE(x) ((x) & RTE_BPF_ARG_PTR)
+
+/**
+ * Possible types for external symbols.
+ */
+enum rte_bpf_xtype {
+ RTE_BPF_XTYPE_FUNC, /**< function */
+ RTE_BPF_XTYPE_VAR, /**< variable */
+ RTE_BPF_XTYPE_NUM
+};
+
+/**
+ * Definition for external symbols available in the BPF program.
+ */
+struct rte_bpf_xsym {
+ const char *name; /**< name */
+ enum rte_bpf_xtype type; /**< type */
+ union {
+ uint64_t (*func)(uint64_t, uint64_t, uint64_t,
+ uint64_t, uint64_t);
+ void *var;
+ }; /**< value */
+};
+
+/**
+ * Input parameters for loading eBPF code.
+ */
+struct rte_bpf_prm {
+ const struct ebpf_insn *ins; /**< array of eBPF instructions */
+ uint32_t nb_ins; /**< number of instructions in ins */
+ const struct rte_bpf_xsym *xsym;
+ /**< array of external symbols that eBPF code is allowed to reference */
+ uint32_t nb_xsym; /**< number of elements in xsym */
+ struct rte_bpf_arg prog_arg; /**< eBPF program input arg description */
+};
+
+/**
+ * Information about compiled into native ISA eBPF code.
+ */
+struct rte_bpf_jit {
+ uint64_t (*func)(void *); /**< JIT-ed native code */
+ size_t sz; /**< size of JIT-ed code */
+};
+
+struct rte_bpf;
+
+/**
+ * De-allocate all memory used by this eBPF execution context.
+ *
+ * @param bpf
+ * BPF handle to destroy.
+ */
+void rte_bpf_destroy(struct rte_bpf *bpf);
+
+/**
+ * Create a new eBPF execution context and load given BPF code into it.
+ *
+ * @param prm
+ * Parameters used to create and initialise the BPF exeution context.
+ * @return
+ * BPF handle that is used in future BPF operations,
+ * or NULL on error, with error code set in rte_errno.
+ * Possible rte_errno errors include:
+ * - EINVAL - invalid parameter passed to function
+ * - ENOMEM - can't reserve enough memory
+ */
+struct rte_bpf *rte_bpf_load(const struct rte_bpf_prm *prm);
+
+/**
+ * Execute given BPF bytecode.
+ *
+ * @param bpf
+ * handle for the BPF code to execute.
+ * @param ctx
+ * pointer to input context.
+ * @return
+ * BPF execution return value.
+ */
+uint64_t rte_bpf_exec(const struct rte_bpf *bpf, void *ctx);
+
+/**
+ * Execute given BPF bytecode over a set of input contexts.
+ *
+ * @param bpf
+ * handle for the BPF code to execute.
+ * @param ctx
+ * array of pointers to the input contexts.
+ * @param rc
+ * array of return values (one per input).
+ * @param num
+ * number of elements in ctx[] (and rc[]).
+ * @return
+ * number of successfully processed inputs.
+ */
+uint32_t rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[],
+ uint64_t rc[], uint32_t num);
+
+/**
+ * Provide information about natively compield code for given BPF handle.
+ *
+ * @param bpf
+ * handle for the BPF code.
+ * @param jit
+ * pointer to the rte_bpf_jit structure to be filled with related data.
+ * @return
+ * - -EINVAL if the parameters are invalid.
+ * - Zero if operation completed successfully.
+ */
+int rte_bpf_get_jit(const struct rte_bpf *bpf, struct rte_bpf_jit *jit);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BPF_H_ */