/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation.
+ * Copyright(c) 2019-2020 Intel Corporation.
*/
#include <unistd.h>
#include <errno.h>
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ring.h>
+#include <rte_spinlock.h>
+
+#include "compat.h"
+
#ifndef SOL_XDP
#define SOL_XDP 283
struct xsk_umem_info {
- struct xsk_ring_prod fq;
- struct xsk_ring_cons cq;
struct xsk_umem *umem;
struct rte_ring *buf_ring;
const struct rte_memzone *mz;
struct rte_mempool *mb_pool;
void *buffer;
+ uint8_t refcnt;
+ uint32_t max_xsks;
};
struct rx_stats {
struct rx_stats stats;
+ struct xsk_ring_prod fq;
+ struct xsk_ring_cons cq;
+
struct pkt_tx_queue *pair;
struct pollfd fds[1];
int xsk_queue_idx;
int queue_cnt;
int max_queue_cnt;
int combined_queue_cnt;
+ bool shared_umem;
+ char prog_path[PATH_MAX];
+ bool custom_prog_configured;
struct rte_ether_addr eth_addr;
#define ETH_AF_XDP_IFACE_ARG "iface"
#define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
#define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
+#define ETH_AF_XDP_SHARED_UMEM_ARG "shared_umem"
+#define ETH_AF_XDP_PROG_ARG "xdp_prog"
static const char * const valid_arguments[] = {
ETH_AF_XDP_IFACE_ARG,
ETH_AF_XDP_START_QUEUE_ARG,
ETH_AF_XDP_QUEUE_COUNT_ARG,
+ ETH_AF_XDP_SHARED_UMEM_ARG,
+ ETH_AF_XDP_PROG_ARG,
NULL
};
.link_autoneg = ETH_LINK_AUTONEG
};
+/* List which tracks PMDs to facilitate sharing UMEMs across them. */
+struct internal_list {
+ TAILQ_ENTRY(internal_list) next;
+ struct rte_eth_dev *eth_dev;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+static struct internal_list_head internal_list =
+ TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
static inline int
reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
- struct rte_mbuf **bufs)
+ struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
{
- struct xsk_ring_prod *fq = &umem->fq;
uint32_t idx;
uint16_t i;
#else
static inline int
reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
- struct rte_mbuf **bufs __rte_unused)
+ struct rte_mbuf **bufs __rte_unused,
+ struct xsk_ring_prod *fq)
{
- struct xsk_ring_prod *fq = &umem->fq;
void *addrs[reserve_size];
uint32_t idx;
uint16_t i;
static inline int
reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
- struct rte_mbuf **bufs)
+ struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
{
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
- return reserve_fill_queue_zc(umem, reserve_size, bufs);
+ return reserve_fill_queue_zc(umem, reserve_size, bufs, fq);
#else
- return reserve_fill_queue_cp(umem, reserve_size, bufs);
+ return reserve_fill_queue_cp(umem, reserve_size, bufs, fq);
#endif
}
{
struct pkt_rx_queue *rxq = queue;
struct xsk_ring_cons *rx = &rxq->rx;
+ struct xsk_ring_prod *fq = &rxq->fq;
struct xsk_umem_info *umem = rxq->umem;
uint32_t idx_rx = 0;
unsigned long rx_bytes = 0;
if (rcvd == 0) {
#if defined(XDP_USE_NEED_WAKEUP)
- if (xsk_ring_prod__needs_wakeup(&umem->fq))
+ if (xsk_ring_prod__needs_wakeup(fq))
(void)poll(rxq->fds, 1, 1000);
#endif
xsk_ring_cons__release(rx, rcvd);
- (void)reserve_fill_queue(umem, rcvd, fq_bufs);
+ (void)reserve_fill_queue(umem, rcvd, fq_bufs, fq);
/* statistics */
rxq->stats.rx_pkts += rcvd;
struct pkt_rx_queue *rxq = queue;
struct xsk_ring_cons *rx = &rxq->rx;
struct xsk_umem_info *umem = rxq->umem;
- struct xsk_ring_prod *fq = &umem->fq;
+ struct xsk_ring_prod *fq = &rxq->fq;
uint32_t idx_rx = 0;
unsigned long rx_bytes = 0;
int rcvd, i;
uint32_t free_thresh = fq->size >> 1;
struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
+ if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
+ (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE,
+ NULL, fq);
+
if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
return 0;
goto out;
}
- if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
- (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL);
-
for (i = 0; i < rcvd; i++) {
const struct xdp_desc *desc;
uint64_t addr;
}
static void
-pull_umem_cq(struct xsk_umem_info *umem, int size)
+pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
{
- struct xsk_ring_cons *cq = &umem->cq;
size_t i, n;
uint32_t idx_cq = 0;
}
static void
-kick_tx(struct pkt_tx_queue *txq)
+kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
{
struct xsk_umem_info *umem = txq->umem;
- pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS);
+ pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
#if defined(XDP_USE_NEED_WAKEUP)
if (xsk_ring_prod__needs_wakeup(&txq->tx))
/* pull from completion queue to leave more space */
if (errno == EAGAIN)
pull_umem_cq(umem,
- XSK_RING_CONS__DEFAULT_NUM_DESCS);
+ XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ cq);
}
}
uint16_t count = 0;
struct xdp_desc *desc;
uint64_t addr, offset;
- uint32_t free_thresh = umem->cq.size >> 1;
+ struct xsk_ring_cons *cq = &txq->pair->cq;
+ uint32_t free_thresh = cq->size >> 1;
- if (xsk_cons_nb_avail(&umem->cq, free_thresh) >= free_thresh)
- pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS);
+ if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
+ pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
if (mbuf->pool == umem->mb_pool) {
if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
- kick_tx(txq);
+ kick_tx(txq, cq);
if (!xsk_ring_prod__reserve(&txq->tx, 1,
&idx_tx))
goto out;
if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
rte_pktmbuf_free(local_mbuf);
- kick_tx(txq);
+ kick_tx(txq, cq);
goto out;
}
tx_bytes += mbuf->pkt_len;
}
- kick_tx(txq);
+ kick_tx(txq, cq);
out:
xsk_ring_prod__submit(&txq->tx, count);
unsigned long tx_bytes = 0;
int i;
uint32_t idx_tx;
+ struct xsk_ring_cons *cq = &txq->pair->cq;
nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
- pull_umem_cq(umem, nb_pkts);
+ pull_umem_cq(umem, nb_pkts, cq);
nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
nb_pkts, NULL);
return 0;
if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
- kick_tx(txq);
+ kick_tx(txq, cq);
rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
return 0;
}
xsk_ring_prod__submit(&txq->tx, nb_pkts);
- kick_tx(txq);
+ kick_tx(txq, cq);
txq->stats.tx_pkts += nb_pkts;
txq->stats.tx_bytes += tx_bytes;
dev->data->dev_link.link_status = ETH_LINK_DOWN;
}
+/* Find ethdev in list */
+static inline struct internal_list *
+find_internal_resource(struct pmd_internals *port_int)
+{
+ int found = 0;
+ struct internal_list *list = NULL;
+
+ if (port_int == NULL)
+ return NULL;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ struct pmd_internals *list_int =
+ list->eth_dev->data->dev_private;
+ if (list_int == port_int) {
+ found = 1;
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ if (!found)
+ return NULL;
+
+ return list;
+}
+
+/* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
+static inline struct xsk_umem_info *
+get_shared_umem(struct pkt_rx_queue *rxq) {
+ struct internal_list *list;
+ struct pmd_internals *internals;
+ int i = 0;
+ struct rte_mempool *mb_pool = rxq->mb_pool;
+
+ if (mb_pool == NULL)
+ return NULL;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ internals = list->eth_dev->data->dev_private;
+ for (i = 0; i < internals->queue_cnt; i++) {
+ struct pkt_rx_queue *list_rxq =
+ &internals->rx_queues[i];
+ if (rxq == list_rxq)
+ continue;
+ if (mb_pool == internals->rx_queues[i].mb_pool) {
+ if (__atomic_load_n(
+ &internals->rx_queues[i].umem->refcnt,
+ __ATOMIC_ACQUIRE)) {
+ pthread_mutex_unlock(
+ &internal_list_lock);
+ return internals->rx_queues[i].umem;
+ }
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ return NULL;
+}
+
static int
eth_dev_configure(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internal = dev->data->dev_private;
+
/* rx/tx must be paired */
if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
return -EINVAL;
+ if (internal->shared_umem) {
+ struct internal_list *list = NULL;
+ const char *name = dev->device->name;
+
+ /* Ensure PMD is not already inserted into the list */
+ list = find_internal_resource(internal);
+ if (list)
+ return 0;
+
+ list = rte_zmalloc_socket(name, sizeof(*list), 0,
+ dev->device->numa_node);
+ if (list == NULL)
+ return -1;
+
+ list->eth_dev = dev;
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_INSERT_TAIL(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+ }
+
return 0;
}
umem = NULL;
}
-static void
+static int
eth_dev_close(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = dev->data->dev_private;
struct pkt_rx_queue *rxq;
int i;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
rte_socket_id());
if (rxq->umem == NULL)
break;
xsk_socket__delete(rxq->xsk);
- (void)xsk_umem__delete(rxq->umem->umem);
- xdp_umem_destroy(rxq->umem);
+
+ if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE)
+ == 0) {
+ (void)xsk_umem__delete(rxq->umem->umem);
+ xdp_umem_destroy(rxq->umem);
+ }
/* free pkt_tx_queue */
rte_free(rxq->pair);
dev->data->mac_addrs = NULL;
remove_xdp_program(internals);
+
+ if (internals->shared_umem) {
+ struct internal_list *list;
+
+ /* Remove ethdev from list used to track and share UMEMs */
+ list = find_internal_resource(internals);
+ if (list) {
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_REMOVE(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+ rte_free(list);
+ }
+ }
+
+ return 0;
}
static void
}
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
-static inline uint64_t get_base_addr(struct rte_mempool *mp)
+static inline uint64_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
{
struct rte_mempool_memhdr *memhdr;
+ uint64_t memhdr_addr, aligned_addr;
memhdr = STAILQ_FIRST(&mp->mem_list);
- return (uint64_t)memhdr->addr & ~(getpagesize() - 1);
+ memhdr_addr = (uint64_t)memhdr->addr;
+ aligned_addr = memhdr_addr & ~(getpagesize() - 1);
+ *align = memhdr_addr - aligned_addr;
+
+ return aligned_addr;
}
static struct
-xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals __rte_unused,
+xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
struct pkt_rx_queue *rxq)
{
- struct xsk_umem_info *umem;
+ struct xsk_umem_info *umem = NULL;
int ret;
struct xsk_umem_config usr_config = {
.fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2,
.flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
void *base_addr = NULL;
struct rte_mempool *mb_pool = rxq->mb_pool;
+ uint64_t umem_size, align = 0;
+
+ if (internals->shared_umem) {
+ umem = get_shared_umem(rxq);
+ if (umem != NULL &&
+ __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
+ umem->max_xsks) {
+ AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
+ internals->if_name, rxq->xsk_queue_idx);
+ __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
+ }
+ }
- usr_config.frame_size = rte_mempool_calc_obj_size(mb_pool->elt_size,
- mb_pool->flags,
- NULL);
- usr_config.frame_headroom = mb_pool->header_size +
- sizeof(struct rte_mbuf) +
- rte_pktmbuf_priv_size(mb_pool) +
- RTE_PKTMBUF_HEADROOM;
-
- umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
if (umem == NULL) {
- AF_XDP_LOG(ERR, "Failed to allocate umem info");
- return NULL;
- }
+ usr_config.frame_size =
+ rte_mempool_calc_obj_size(mb_pool->elt_size,
+ mb_pool->flags, NULL);
+ usr_config.frame_headroom = mb_pool->header_size +
+ sizeof(struct rte_mbuf) +
+ rte_pktmbuf_priv_size(mb_pool) +
+ RTE_PKTMBUF_HEADROOM;
+
+ umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
+ rte_socket_id());
+ if (umem == NULL) {
+ AF_XDP_LOG(ERR, "Failed to allocate umem info");
+ return NULL;
+ }
- umem->mb_pool = mb_pool;
- base_addr = (void *)get_base_addr(mb_pool);
+ umem->mb_pool = mb_pool;
+ base_addr = (void *)get_base_addr(mb_pool, &align);
+ umem_size = mb_pool->populated_size * usr_config.frame_size +
+ align;
- ret = xsk_umem__create(&umem->umem, base_addr,
- mb_pool->populated_size * usr_config.frame_size,
- &umem->fq, &umem->cq,
- &usr_config);
+ ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
+ &rxq->fq, &rxq->cq, &usr_config);
+ if (ret) {
+ AF_XDP_LOG(ERR, "Failed to create umem");
+ goto err;
+ }
+ umem->buffer = base_addr;
- if (ret) {
- AF_XDP_LOG(ERR, "Failed to create umem");
- goto err;
+ if (internals->shared_umem) {
+ umem->max_xsks = mb_pool->populated_size /
+ ETH_AF_XDP_NUM_BUFFERS;
+ AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
+ mb_pool->name, umem->max_xsks);
+ }
+
+ __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
}
- umem->buffer = base_addr;
#else
static struct
ret = xsk_umem__create(&umem->umem, mz->addr,
ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
- &umem->fq, &umem->cq,
+ &rxq->fq, &rxq->cq,
&usr_config);
if (ret) {
return NULL;
}
+static int
+load_custom_xdp_prog(const char *prog_path, int if_index)
+{
+ int ret, prog_fd = -1;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+
+ ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (ret) {
+ AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
+ return ret;
+ }
+
+ /*
+ * The loaded program must provision for a map of xsks, such that some
+ * traffic can be redirected to userspace. When the xsk is created,
+ * libbpf inserts it into the map.
+ */
+ map = bpf_object__find_map_by_name(obj, "xsks_map");
+ if (!map) {
+ AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
+ return -1;
+ }
+
+ /* Link the program with the given network device */
+ ret = bpf_set_link_xdp_fd(if_index, prog_fd,
+ XDP_FLAGS_UPDATE_IF_NOEXIST);
+ if (ret) {
+ AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
+ prog_fd);
+ return -1;
+ }
+
+ AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
+ prog_path, prog_fd);
+
+ return 0;
+}
+
static int
xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
int ring_size)
cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
#endif
- ret = xsk_socket__create(&rxq->xsk, internals->if_name,
- rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
- &txq->tx, &cfg);
+ if (strnlen(internals->prog_path, PATH_MAX) &&
+ !internals->custom_prog_configured) {
+ ret = load_custom_xdp_prog(internals->prog_path,
+ internals->if_index);
+ if (ret) {
+ AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
+ internals->prog_path);
+ goto err;
+ }
+ internals->custom_prog_configured = 1;
+ }
+
+ if (internals->shared_umem)
+ ret = create_shared_socket(&rxq->xsk, internals->if_name,
+ rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
+ &txq->tx, &rxq->fq, &rxq->cq, &cfg);
+ else
+ ret = xsk_socket__create(&rxq->xsk, internals->if_name,
+ rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
+ &txq->tx, &cfg);
+
if (ret) {
AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
goto err;
goto err;
}
#endif
- ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs);
+ ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
if (ret) {
xsk_socket__delete(rxq->xsk);
AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
return 0;
err:
- xdp_umem_destroy(rxq->umem);
+ if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
+ xdp_umem_destroy(rxq->umem);
return ret;
}
return 0;
}
+/** parse xdp prog argument */
+static int
+parse_prog_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ char *path = extra_args;
+
+ if (strnlen(value, PATH_MAX) == PATH_MAX) {
+ AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
+ value, PATH_MAX);
+ return -EINVAL;
+ }
+
+ if (access(value, F_OK) != 0) {
+ AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
+ value, strerror(errno));
+ return -EINVAL;
+ }
+
+ strlcpy(path, value, PATH_MAX);
+
+ return 0;
+}
+
static int
xdp_get_channels_info(const char *if_name, int *max_queues,
int *combined_queues)
channels.cmd = ETHTOOL_GCHANNELS;
ifr.ifr_data = (void *)&channels;
- strncpy(ifr.ifr_name, if_name, IFNAMSIZ);
+ strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
ret = ioctl(fd, SIOCETHTOOL, &ifr);
if (ret) {
if (errno == EOPNOTSUPP) {
static int
parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
- int *queue_cnt)
+ int *queue_cnt, int *shared_umem, char *prog_path)
{
int ret;
goto free_kvlist;
}
+ ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG,
+ &parse_integer_arg, shared_umem);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
+ &parse_prog_arg, prog_path);
+ if (ret < 0)
+ goto free_kvlist;
+
free_kvlist:
rte_kvargs_free(kvlist);
return ret;
static struct rte_eth_dev *
init_internals(struct rte_vdev_device *dev, const char *if_name,
- int start_queue_idx, int queue_cnt)
+ int start_queue_idx, int queue_cnt, int shared_umem,
+ const char *prog_path)
{
const char *name = rte_vdev_device_name(dev);
const unsigned int numa_node = dev->device.numa_node;
internals->start_queue_idx = start_queue_idx;
internals->queue_cnt = queue_cnt;
strlcpy(internals->if_name, if_name, IFNAMSIZ);
+ strlcpy(internals->prog_path, prog_path, PATH_MAX);
+ internals->custom_prog_configured = 0;
+
+#ifndef ETH_AF_XDP_SHARED_UMEM
+ if (shared_umem) {
+ AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
+ "Check kernel and libbpf version\n");
+ goto err_free_internals;
+ }
+#endif
+ internals->shared_umem = shared_umem;
if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
&internals->combined_queue_cnt)) {
eth_dev->dev_ops = &ops;
eth_dev->rx_pkt_burst = eth_af_xdp_rx;
eth_dev->tx_pkt_burst = eth_af_xdp_tx;
- /* Let rte_eth_dev_close() release the port resources. */
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
char if_name[IFNAMSIZ] = {'\0'};
int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
+ int shared_umem = 0;
+ char prog_path[PATH_MAX] = {'\0'};
struct rte_eth_dev *eth_dev = NULL;
const char *name;
dev->device.numa_node = rte_socket_id();
if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
- &xsk_queue_cnt) < 0) {
+ &xsk_queue_cnt, &shared_umem, prog_path) < 0) {
AF_XDP_LOG(ERR, "Invalid kvargs value\n");
return -EINVAL;
}
}
eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
- xsk_queue_cnt);
+ xsk_queue_cnt, shared_umem, prog_path);
if (eth_dev == NULL) {
AF_XDP_LOG(ERR, "Failed to init internals\n");
return -1;
RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
"iface=<string> "
"start_queue=<int> "
- "queue_count=<int> ");
+ "queue_count=<int> "
+ "shared_umem=<int> "
+ "xdp_prog=<string> ");