#include <bpf/xsk.h>
#include <rte_ethdev.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
#include <rte_kvargs.h>
#include <rte_bus_vdev.h>
#include <rte_string_fns.h>
int max_queue_cnt;
int combined_queue_cnt;
bool shared_umem;
+ char prog_path[PATH_MAX];
+ bool custom_prog_configured;
struct rte_ether_addr eth_addr;
#define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
#define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
#define ETH_AF_XDP_SHARED_UMEM_ARG "shared_umem"
+#define ETH_AF_XDP_PROG_ARG "xdp_prog"
static const char * const valid_arguments[] = {
ETH_AF_XDP_IFACE_ARG,
ETH_AF_XDP_START_QUEUE_ARG,
ETH_AF_XDP_QUEUE_COUNT_ARG,
ETH_AF_XDP_SHARED_UMEM_ARG,
+ ETH_AF_XDP_PROG_ARG,
NULL
};
struct xsk_umem_info *umem = rxq->umem;
uint32_t idx_rx = 0;
unsigned long rx_bytes = 0;
- int rcvd, i;
+ int i;
struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
- /* allocate bufs for fill queue replenishment after rx */
- if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
- AF_XDP_LOG(DEBUG,
- "Failed to get enough buffers for fq.\n");
- return 0;
- }
+ nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
- rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
-
- if (rcvd == 0) {
+ if (nb_pkts == 0) {
#if defined(XDP_USE_NEED_WAKEUP)
if (xsk_ring_prod__needs_wakeup(fq))
(void)poll(rxq->fds, 1, 1000);
#endif
- goto out;
+ return 0;
}
- for (i = 0; i < rcvd; i++) {
+ /* allocate bufs for fill queue replenishment after rx */
+ if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
+ AF_XDP_LOG(DEBUG,
+ "Failed to get enough buffers for fq.\n");
+ /* rollback cached_cons which is added by
+ * xsk_ring_cons__peek
+ */
+ rx->cached_cons -= nb_pkts;
+ return 0;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
const struct xdp_desc *desc;
uint64_t addr;
uint32_t len;
rx_bytes += len;
}
- xsk_ring_cons__release(rx, rcvd);
-
- (void)reserve_fill_queue(umem, rcvd, fq_bufs, fq);
+ xsk_ring_cons__release(rx, nb_pkts);
+ (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
/* statistics */
- rxq->stats.rx_pkts += rcvd;
+ rxq->stats.rx_pkts += nb_pkts;
rxq->stats.rx_bytes += rx_bytes;
-out:
- if (rcvd != nb_pkts)
- rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd],
- nb_pkts - rcvd);
-
- return rcvd;
+ return nb_pkts;
}
#else
static uint16_t
struct xsk_ring_prod *fq = &rxq->fq;
uint32_t idx_rx = 0;
unsigned long rx_bytes = 0;
- int rcvd, i;
+ int i;
uint32_t free_thresh = fq->size >> 1;
struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
(void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE,
NULL, fq);
- if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
- return 0;
-
- rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
- if (rcvd == 0) {
+ nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
+ if (nb_pkts == 0) {
#if defined(XDP_USE_NEED_WAKEUP)
if (xsk_ring_prod__needs_wakeup(fq))
(void)poll(rxq->fds, 1, 1000);
#endif
+ return 0;
+ }
- goto out;
+ if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
+ /* rollback cached_cons which is added by
+ * xsk_ring_cons__peek
+ */
+ rx->cached_cons -= nb_pkts;
+ return 0;
}
- for (i = 0; i < rcvd; i++) {
+ for (i = 0; i < nb_pkts; i++) {
const struct xdp_desc *desc;
uint64_t addr;
uint32_t len;
bufs[i] = mbufs[i];
}
- xsk_ring_cons__release(rx, rcvd);
+ xsk_ring_cons__release(rx, nb_pkts);
/* statistics */
- rxq->stats.rx_pkts += rcvd;
+ rxq->stats.rx_pkts += nb_pkts;
rxq->stats.rx_bytes += rx_bytes;
-out:
- if (rcvd != nb_pkts)
- rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
- nb_pkts - rcvd);
-
- return rcvd;
+ return nb_pkts;
}
#endif
}
/* This function gets called when the current port gets stopped. */
-static void
+static int
eth_dev_stop(struct rte_eth_dev *dev)
{
dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ return 0;
}
/* Find ethdev in list */
return list;
}
+/* Check if the netdev,qid context already exists */
+static inline bool
+ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
+ struct pkt_rx_queue *list_rxq, const char *list_ifname)
+{
+ bool exists = false;
+
+ if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
+ !strncmp(ifname, list_ifname, IFNAMSIZ)) {
+ AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
+ ifname, rxq->xsk_queue_idx);
+ exists = true;
+ }
+
+ return exists;
+}
+
/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
-static inline struct xsk_umem_info *
-get_shared_umem(struct pkt_rx_queue *rxq) {
+static inline int
+get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
+ struct xsk_umem_info **umem)
+{
struct internal_list *list;
struct pmd_internals *internals;
- int i = 0;
+ int i = 0, ret = 0;
struct rte_mempool *mb_pool = rxq->mb_pool;
if (mb_pool == NULL)
- return NULL;
+ return ret;
pthread_mutex_lock(&internal_list_lock);
if (rxq == list_rxq)
continue;
if (mb_pool == internals->rx_queues[i].mb_pool) {
+ if (ctx_exists(rxq, ifname, list_rxq,
+ internals->if_name)) {
+ ret = -1;
+ goto out;
+ }
if (__atomic_load_n(
&internals->rx_queues[i].umem->refcnt,
__ATOMIC_ACQUIRE)) {
- pthread_mutex_unlock(
- &internal_list_lock);
- return internals->rx_queues[i].umem;
+ *umem = internals->rx_queues[i].umem;
+ goto out;
}
}
}
}
+out:
pthread_mutex_unlock(&internal_list_lock);
- return NULL;
+ return ret;
}
static int
#endif
rte_free(umem);
- umem = NULL;
}
static int
struct pkt_rx_queue *rxq;
int i;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
rte_socket_id());
}
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
-static inline uint64_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
+static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
{
struct rte_mempool_memhdr *memhdr;
- uint64_t memhdr_addr, aligned_addr;
+ uintptr_t memhdr_addr, aligned_addr;
memhdr = STAILQ_FIRST(&mp->mem_list);
- memhdr_addr = (uint64_t)memhdr->addr;
+ memhdr_addr = (uintptr_t)memhdr->addr;
aligned_addr = memhdr_addr & ~(getpagesize() - 1);
*align = memhdr_addr - aligned_addr;
uint64_t umem_size, align = 0;
if (internals->shared_umem) {
- umem = get_shared_umem(rxq);
+ if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
+ return NULL;
+
if (umem != NULL &&
__atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
umem->max_xsks) {
umem->mb_pool = mb_pool;
base_addr = (void *)get_base_addr(mb_pool, &align);
- umem_size = mb_pool->populated_size * usr_config.frame_size +
+ umem_size = (uint64_t)mb_pool->populated_size *
+ (uint64_t)usr_config.frame_size +
align;
ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
return NULL;
}
+static int
+load_custom_xdp_prog(const char *prog_path, int if_index)
+{
+ int ret, prog_fd = -1;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+
+ ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (ret) {
+ AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
+ return ret;
+ }
+
+ /*
+ * The loaded program must provision for a map of xsks, such that some
+ * traffic can be redirected to userspace. When the xsk is created,
+ * libbpf inserts it into the map.
+ */
+ map = bpf_object__find_map_by_name(obj, "xsks_map");
+ if (!map) {
+ AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
+ return -1;
+ }
+
+ /* Link the program with the given network device */
+ ret = bpf_set_link_xdp_fd(if_index, prog_fd,
+ XDP_FLAGS_UPDATE_IF_NOEXIST);
+ if (ret) {
+ AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
+ prog_fd);
+ return -1;
+ }
+
+ AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
+ prog_path, prog_fd);
+
+ return 0;
+}
+
static int
xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
int ring_size)
cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
#endif
+ if (strnlen(internals->prog_path, PATH_MAX) &&
+ !internals->custom_prog_configured) {
+ ret = load_custom_xdp_prog(internals->prog_path,
+ internals->if_index);
+ if (ret) {
+ AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
+ internals->prog_path);
+ goto err;
+ }
+ internals->custom_prog_configured = 1;
+ }
+
if (internals->shared_umem)
ret = create_shared_socket(&rxq->xsk, internals->if_name,
rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
return 0;
}
+/** parse xdp prog argument */
+static int
+parse_prog_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ char *path = extra_args;
+
+ if (strnlen(value, PATH_MAX) == PATH_MAX) {
+ AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
+ value, PATH_MAX);
+ return -EINVAL;
+ }
+
+ if (access(value, F_OK) != 0) {
+ AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
+ value, strerror(errno));
+ return -EINVAL;
+ }
+
+ strlcpy(path, value, PATH_MAX);
+
+ return 0;
+}
+
static int
xdp_get_channels_info(const char *if_name, int *max_queues,
int *combined_queues)
channels.cmd = ETHTOOL_GCHANNELS;
ifr.ifr_data = (void *)&channels;
- strncpy(ifr.ifr_name, if_name, IFNAMSIZ);
+ strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
ret = ioctl(fd, SIOCETHTOOL, &ifr);
if (ret) {
if (errno == EOPNOTSUPP) {
static int
parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
- int *queue_cnt, int *shared_umem)
+ int *queue_cnt, int *shared_umem, char *prog_path)
{
int ret;
if (ret < 0)
goto free_kvlist;
+ ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
+ &parse_prog_arg, prog_path);
+ if (ret < 0)
+ goto free_kvlist;
+
free_kvlist:
rte_kvargs_free(kvlist);
return ret;
static struct rte_eth_dev *
init_internals(struct rte_vdev_device *dev, const char *if_name,
- int start_queue_idx, int queue_cnt, int shared_umem)
+ int start_queue_idx, int queue_cnt, int shared_umem,
+ const char *prog_path)
{
const char *name = rte_vdev_device_name(dev);
const unsigned int numa_node = dev->device.numa_node;
internals->start_queue_idx = start_queue_idx;
internals->queue_cnt = queue_cnt;
strlcpy(internals->if_name, if_name, IFNAMSIZ);
+ strlcpy(internals->prog_path, prog_path, PATH_MAX);
+ internals->custom_prog_configured = 0;
#ifndef ETH_AF_XDP_SHARED_UMEM
if (shared_umem) {
eth_dev->data->dev_private = internals;
eth_dev->data->dev_link = pmd_link;
eth_dev->data->mac_addrs = &internals->eth_addr;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;
eth_dev->rx_pkt_burst = eth_af_xdp_rx;
eth_dev->tx_pkt_burst = eth_af_xdp_tx;
- /* Let rte_eth_dev_close() release the port resources. */
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
int shared_umem = 0;
+ char prog_path[PATH_MAX] = {'\0'};
struct rte_eth_dev *eth_dev = NULL;
const char *name;
dev->device.numa_node = rte_socket_id();
if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
- &xsk_queue_cnt, &shared_umem) < 0) {
+ &xsk_queue_cnt, &shared_umem, prog_path) < 0) {
AF_XDP_LOG(ERR, "Invalid kvargs value\n");
return -EINVAL;
}
}
eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
- xsk_queue_cnt, shared_umem);
+ xsk_queue_cnt, shared_umem, prog_path);
if (eth_dev == NULL) {
AF_XDP_LOG(ERR, "Failed to init internals\n");
return -1;
"iface=<string> "
"start_queue=<int> "
"queue_count=<int> "
- "shared_umem=<int> ");
+ "shared_umem=<int> "
+ "xdp_prog=<string> ");