1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation.
9 #include <netinet/in.h>
11 #include <sys/socket.h>
12 #include <sys/ioctl.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_xdp.h>
15 #include <linux/if_link.h>
16 #include <linux/ethtool.h>
17 #include <linux/sockios.h>
18 #include "af_xdp_deps.h"
21 #include <rte_ethdev.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_bus_vdev.h>
26 #include <rte_string_fns.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
31 #include <rte_ether.h>
32 #include <rte_lcore.h>
34 #include <rte_memory.h>
35 #include <rte_memzone.h>
36 #include <rte_mempool.h>
38 #include <rte_malloc.h>
40 #include <rte_spinlock.h>
57 RTE_LOG_REGISTER(af_xdp_logtype, pmd.net.af_xdp, NOTICE);
59 #define AF_XDP_LOG(level, fmt, args...) \
60 rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
61 "%s(): " fmt, __func__, ##args)
63 #define ETH_AF_XDP_FRAME_SIZE 2048
64 #define ETH_AF_XDP_NUM_BUFFERS 4096
65 #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS
66 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
67 #define ETH_AF_XDP_DFLT_QUEUE_COUNT 1
69 #define ETH_AF_XDP_RX_BATCH_SIZE 32
70 #define ETH_AF_XDP_TX_BATCH_SIZE 32
73 struct xsk_umem_info {
74 struct xsk_umem *umem;
75 struct rte_ring *buf_ring;
76 const struct rte_memzone *mz;
77 struct rte_mempool *mb_pool;
90 struct xsk_ring_cons rx;
91 struct xsk_umem_info *umem;
92 struct xsk_socket *xsk;
93 struct rte_mempool *mb_pool;
95 struct rx_stats stats;
97 struct xsk_ring_prod fq;
98 struct xsk_ring_cons cq;
100 struct pkt_tx_queue *pair;
101 struct pollfd fds[1];
111 struct pkt_tx_queue {
112 struct xsk_ring_prod tx;
113 struct xsk_umem_info *umem;
115 struct tx_stats stats;
117 struct pkt_rx_queue *pair;
121 struct pmd_internals {
123 char if_name[IFNAMSIZ];
127 int combined_queue_cnt;
129 char prog_path[PATH_MAX];
130 bool custom_prog_configured;
132 struct rte_ether_addr eth_addr;
134 struct pkt_rx_queue *rx_queues;
135 struct pkt_tx_queue *tx_queues;
138 #define ETH_AF_XDP_IFACE_ARG "iface"
139 #define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
140 #define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
141 #define ETH_AF_XDP_SHARED_UMEM_ARG "shared_umem"
142 #define ETH_AF_XDP_PROG_ARG "xdp_prog"
144 static const char * const valid_arguments[] = {
145 ETH_AF_XDP_IFACE_ARG,
146 ETH_AF_XDP_START_QUEUE_ARG,
147 ETH_AF_XDP_QUEUE_COUNT_ARG,
148 ETH_AF_XDP_SHARED_UMEM_ARG,
153 static const struct rte_eth_link pmd_link = {
154 .link_speed = ETH_SPEED_NUM_10G,
155 .link_duplex = ETH_LINK_FULL_DUPLEX,
156 .link_status = ETH_LINK_DOWN,
157 .link_autoneg = ETH_LINK_AUTONEG
160 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
161 struct internal_list {
162 TAILQ_ENTRY(internal_list) next;
163 struct rte_eth_dev *eth_dev;
166 TAILQ_HEAD(internal_list_head, internal_list);
167 static struct internal_list_head internal_list =
168 TAILQ_HEAD_INITIALIZER(internal_list);
170 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
172 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
174 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
175 struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
180 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
181 for (i = 0; i < reserve_size; i++)
182 rte_pktmbuf_free(bufs[i]);
183 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
187 for (i = 0; i < reserve_size; i++) {
191 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
192 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
193 umem->mb_pool->header_size;
197 xsk_ring_prod__submit(fq, reserve_size);
203 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
204 struct rte_mbuf **bufs __rte_unused,
205 struct xsk_ring_prod *fq)
207 void *addrs[reserve_size];
211 if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
213 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
217 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
218 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
219 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
224 for (i = 0; i < reserve_size; i++) {
227 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
228 *fq_addr = (uint64_t)addrs[i];
231 xsk_ring_prod__submit(fq, reserve_size);
238 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
239 struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
241 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
242 return reserve_fill_queue_zc(umem, reserve_size, bufs, fq);
244 return reserve_fill_queue_cp(umem, reserve_size, bufs, fq);
248 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
250 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
252 struct pkt_rx_queue *rxq = queue;
253 struct xsk_ring_cons *rx = &rxq->rx;
254 struct xsk_ring_prod *fq = &rxq->fq;
255 struct xsk_umem_info *umem = rxq->umem;
257 unsigned long rx_bytes = 0;
259 struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
261 nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
264 #if defined(XDP_USE_NEED_WAKEUP)
265 if (xsk_ring_prod__needs_wakeup(fq))
266 (void)poll(rxq->fds, 1, 1000);
272 /* allocate bufs for fill queue replenishment after rx */
273 if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
275 "Failed to get enough buffers for fq.\n");
276 /* rollback cached_cons which is added by
277 * xsk_ring_cons__peek
279 rx->cached_cons -= nb_pkts;
283 for (i = 0; i < nb_pkts; i++) {
284 const struct xdp_desc *desc;
289 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
293 offset = xsk_umem__extract_offset(addr);
294 addr = xsk_umem__extract_addr(addr);
296 bufs[i] = (struct rte_mbuf *)
297 xsk_umem__get_data(umem->buffer, addr +
298 umem->mb_pool->header_size);
299 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
300 rte_pktmbuf_priv_size(umem->mb_pool) -
301 umem->mb_pool->header_size;
303 rte_pktmbuf_pkt_len(bufs[i]) = len;
304 rte_pktmbuf_data_len(bufs[i]) = len;
308 xsk_ring_cons__release(rx, nb_pkts);
309 (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
312 rxq->stats.rx_pkts += nb_pkts;
313 rxq->stats.rx_bytes += rx_bytes;
319 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
321 struct pkt_rx_queue *rxq = queue;
322 struct xsk_ring_cons *rx = &rxq->rx;
323 struct xsk_umem_info *umem = rxq->umem;
324 struct xsk_ring_prod *fq = &rxq->fq;
326 unsigned long rx_bytes = 0;
328 uint32_t free_thresh = fq->size >> 1;
329 struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
331 if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
332 (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE,
335 nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
337 #if defined(XDP_USE_NEED_WAKEUP)
338 if (xsk_ring_prod__needs_wakeup(fq))
339 (void)poll(rxq->fds, 1, 1000);
344 if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
345 /* rollback cached_cons which is added by
346 * xsk_ring_cons__peek
348 rx->cached_cons -= nb_pkts;
352 for (i = 0; i < nb_pkts; i++) {
353 const struct xdp_desc *desc;
358 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
361 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
363 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
364 rte_ring_enqueue(umem->buf_ring, (void *)addr);
365 rte_pktmbuf_pkt_len(mbufs[i]) = len;
366 rte_pktmbuf_data_len(mbufs[i]) = len;
371 xsk_ring_cons__release(rx, nb_pkts);
374 rxq->stats.rx_pkts += nb_pkts;
375 rxq->stats.rx_bytes += rx_bytes;
382 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
384 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
386 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
387 return af_xdp_rx_zc(queue, bufs, nb_pkts);
389 return af_xdp_rx_cp(queue, bufs, nb_pkts);
394 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
399 n = xsk_ring_cons__peek(cq, size, &idx_cq);
401 for (i = 0; i < n; i++) {
403 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
404 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
405 addr = xsk_umem__extract_addr(addr);
406 rte_pktmbuf_free((struct rte_mbuf *)
407 xsk_umem__get_data(umem->buffer,
408 addr + umem->mb_pool->header_size));
410 rte_ring_enqueue(umem->buf_ring, (void *)addr);
414 xsk_ring_cons__release(cq, n);
418 kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
420 struct xsk_umem_info *umem = txq->umem;
422 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
424 #if defined(XDP_USE_NEED_WAKEUP)
425 if (xsk_ring_prod__needs_wakeup(&txq->tx))
427 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
428 0, MSG_DONTWAIT) < 0) {
429 /* some thing unexpected */
430 if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
433 /* pull from completion queue to leave more space */
436 XSK_RING_CONS__DEFAULT_NUM_DESCS,
441 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
443 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
445 struct pkt_tx_queue *txq = queue;
446 struct xsk_umem_info *umem = txq->umem;
447 struct rte_mbuf *mbuf;
448 unsigned long tx_bytes = 0;
452 struct xdp_desc *desc;
453 uint64_t addr, offset;
454 struct xsk_ring_cons *cq = &txq->pair->cq;
455 uint32_t free_thresh = cq->size >> 1;
457 if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
458 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
460 for (i = 0; i < nb_pkts; i++) {
463 if (mbuf->pool == umem->mb_pool) {
464 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
466 if (!xsk_ring_prod__reserve(&txq->tx, 1,
470 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
471 desc->len = mbuf->pkt_len;
472 addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
473 umem->mb_pool->header_size;
474 offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
476 umem->mb_pool->header_size;
477 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
478 desc->addr = addr | offset;
481 struct rte_mbuf *local_mbuf =
482 rte_pktmbuf_alloc(umem->mb_pool);
485 if (local_mbuf == NULL)
488 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
489 rte_pktmbuf_free(local_mbuf);
494 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
495 desc->len = mbuf->pkt_len;
497 addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
498 umem->mb_pool->header_size;
499 offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
500 (uint64_t)local_mbuf +
501 umem->mb_pool->header_size;
502 pkt = xsk_umem__get_data(umem->buffer, addr + offset);
503 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
504 desc->addr = addr | offset;
505 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
507 rte_pktmbuf_free(mbuf);
511 tx_bytes += mbuf->pkt_len;
517 xsk_ring_prod__submit(&txq->tx, count);
519 txq->stats.tx_pkts += count;
520 txq->stats.tx_bytes += tx_bytes;
521 txq->stats.tx_dropped += nb_pkts - count;
527 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
529 struct pkt_tx_queue *txq = queue;
530 struct xsk_umem_info *umem = txq->umem;
531 struct rte_mbuf *mbuf;
532 void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
533 unsigned long tx_bytes = 0;
536 struct xsk_ring_cons *cq = &txq->pair->cq;
538 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
540 pull_umem_cq(umem, nb_pkts, cq);
542 nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
547 if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
549 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
553 for (i = 0; i < nb_pkts; i++) {
554 struct xdp_desc *desc;
557 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
559 desc->len = mbuf->pkt_len;
561 desc->addr = (uint64_t)addrs[i];
562 pkt = xsk_umem__get_data(umem->mz->addr,
564 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
565 tx_bytes += mbuf->pkt_len;
566 rte_pktmbuf_free(mbuf);
569 xsk_ring_prod__submit(&txq->tx, nb_pkts);
573 txq->stats.tx_pkts += nb_pkts;
574 txq->stats.tx_bytes += tx_bytes;
581 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
583 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
584 return af_xdp_tx_zc(queue, bufs, nb_pkts);
586 return af_xdp_tx_cp(queue, bufs, nb_pkts);
591 eth_dev_start(struct rte_eth_dev *dev)
593 dev->data->dev_link.link_status = ETH_LINK_UP;
598 /* This function gets called when the current port gets stopped. */
600 eth_dev_stop(struct rte_eth_dev *dev)
602 dev->data->dev_link.link_status = ETH_LINK_DOWN;
606 /* Find ethdev in list */
607 static inline struct internal_list *
608 find_internal_resource(struct pmd_internals *port_int)
611 struct internal_list *list = NULL;
613 if (port_int == NULL)
616 pthread_mutex_lock(&internal_list_lock);
618 TAILQ_FOREACH(list, &internal_list, next) {
619 struct pmd_internals *list_int =
620 list->eth_dev->data->dev_private;
621 if (list_int == port_int) {
627 pthread_mutex_unlock(&internal_list_lock);
635 /* Check if the netdev,qid context already exists */
637 ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
638 struct pkt_rx_queue *list_rxq, const char *list_ifname)
642 if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
643 !strncmp(ifname, list_ifname, IFNAMSIZ)) {
644 AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
645 ifname, rxq->xsk_queue_idx);
652 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
654 get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
655 struct xsk_umem_info **umem)
657 struct internal_list *list;
658 struct pmd_internals *internals;
660 struct rte_mempool *mb_pool = rxq->mb_pool;
665 pthread_mutex_lock(&internal_list_lock);
667 TAILQ_FOREACH(list, &internal_list, next) {
668 internals = list->eth_dev->data->dev_private;
669 for (i = 0; i < internals->queue_cnt; i++) {
670 struct pkt_rx_queue *list_rxq =
671 &internals->rx_queues[i];
674 if (mb_pool == internals->rx_queues[i].mb_pool) {
675 if (ctx_exists(rxq, ifname, list_rxq,
676 internals->if_name)) {
681 &internals->rx_queues[i].umem->refcnt,
683 *umem = internals->rx_queues[i].umem;
691 pthread_mutex_unlock(&internal_list_lock);
697 eth_dev_configure(struct rte_eth_dev *dev)
699 struct pmd_internals *internal = dev->data->dev_private;
701 /* rx/tx must be paired */
702 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
705 if (internal->shared_umem) {
706 struct internal_list *list = NULL;
707 const char *name = dev->device->name;
709 /* Ensure PMD is not already inserted into the list */
710 list = find_internal_resource(internal);
714 list = rte_zmalloc_socket(name, sizeof(*list), 0,
715 dev->device->numa_node);
720 pthread_mutex_lock(&internal_list_lock);
721 TAILQ_INSERT_TAIL(&internal_list, list, next);
722 pthread_mutex_unlock(&internal_list_lock);
729 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
731 struct pmd_internals *internals = dev->data->dev_private;
733 dev_info->if_index = internals->if_index;
734 dev_info->max_mac_addrs = 1;
735 dev_info->max_rx_pktlen = ETH_FRAME_LEN;
736 dev_info->max_rx_queues = internals->queue_cnt;
737 dev_info->max_tx_queues = internals->queue_cnt;
739 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
740 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
741 dev_info->max_mtu = getpagesize() -
742 sizeof(struct rte_mempool_objhdr) -
743 sizeof(struct rte_mbuf) -
744 RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
746 dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
749 dev_info->default_rxportconf.nb_queues = 1;
750 dev_info->default_txportconf.nb_queues = 1;
751 dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
752 dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
758 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
760 struct pmd_internals *internals = dev->data->dev_private;
761 struct xdp_statistics xdp_stats;
762 struct pkt_rx_queue *rxq;
763 struct pkt_tx_queue *txq;
767 for (i = 0; i < dev->data->nb_rx_queues; i++) {
768 optlen = sizeof(struct xdp_statistics);
769 rxq = &internals->rx_queues[i];
771 stats->q_ipackets[i] = rxq->stats.rx_pkts;
772 stats->q_ibytes[i] = rxq->stats.rx_bytes;
774 stats->q_opackets[i] = txq->stats.tx_pkts;
775 stats->q_obytes[i] = txq->stats.tx_bytes;
777 stats->ipackets += stats->q_ipackets[i];
778 stats->ibytes += stats->q_ibytes[i];
779 stats->imissed += rxq->stats.rx_dropped;
780 stats->oerrors += txq->stats.tx_dropped;
781 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
782 XDP_STATISTICS, &xdp_stats, &optlen);
784 AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
787 stats->imissed += xdp_stats.rx_dropped;
789 stats->opackets += stats->q_opackets[i];
790 stats->obytes += stats->q_obytes[i];
797 eth_stats_reset(struct rte_eth_dev *dev)
799 struct pmd_internals *internals = dev->data->dev_private;
802 for (i = 0; i < internals->queue_cnt; i++) {
803 memset(&internals->rx_queues[i].stats, 0,
804 sizeof(struct rx_stats));
805 memset(&internals->tx_queues[i].stats, 0,
806 sizeof(struct tx_stats));
813 remove_xdp_program(struct pmd_internals *internals)
815 uint32_t curr_prog_id = 0;
817 if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
818 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
819 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
822 bpf_set_link_xdp_fd(internals->if_index, -1,
823 XDP_FLAGS_UPDATE_IF_NOEXIST);
827 xdp_umem_destroy(struct xsk_umem_info *umem)
829 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
830 umem->mb_pool = NULL;
832 rte_memzone_free(umem->mz);
835 rte_ring_free(umem->buf_ring);
836 umem->buf_ring = NULL;
843 eth_dev_close(struct rte_eth_dev *dev)
845 struct pmd_internals *internals = dev->data->dev_private;
846 struct pkt_rx_queue *rxq;
849 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
852 AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
855 for (i = 0; i < internals->queue_cnt; i++) {
856 rxq = &internals->rx_queues[i];
857 if (rxq->umem == NULL)
859 xsk_socket__delete(rxq->xsk);
861 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE)
863 (void)xsk_umem__delete(rxq->umem->umem);
864 xdp_umem_destroy(rxq->umem);
867 /* free pkt_tx_queue */
873 * MAC is not allocated dynamically, setting it to NULL would prevent
874 * from releasing it in rte_eth_dev_release_port.
876 dev->data->mac_addrs = NULL;
878 remove_xdp_program(internals);
880 if (internals->shared_umem) {
881 struct internal_list *list;
883 /* Remove ethdev from list used to track and share UMEMs */
884 list = find_internal_resource(internals);
886 pthread_mutex_lock(&internal_list_lock);
887 TAILQ_REMOVE(&internal_list, list, next);
888 pthread_mutex_unlock(&internal_list_lock);
897 eth_queue_release(void *q __rte_unused)
902 eth_link_update(struct rte_eth_dev *dev __rte_unused,
903 int wait_to_complete __rte_unused)
908 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
909 static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
911 struct rte_mempool_memhdr *memhdr;
912 uintptr_t memhdr_addr, aligned_addr;
914 memhdr = STAILQ_FIRST(&mp->mem_list);
915 memhdr_addr = (uintptr_t)memhdr->addr;
916 aligned_addr = memhdr_addr & ~(getpagesize() - 1);
917 *align = memhdr_addr - aligned_addr;
923 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
924 struct pkt_rx_queue *rxq)
926 struct xsk_umem_info *umem = NULL;
928 struct xsk_umem_config usr_config = {
929 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2,
930 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
931 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
932 void *base_addr = NULL;
933 struct rte_mempool *mb_pool = rxq->mb_pool;
934 uint64_t umem_size, align = 0;
936 if (internals->shared_umem) {
937 if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
941 __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
943 AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
944 internals->if_name, rxq->xsk_queue_idx);
945 __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
950 usr_config.frame_size =
951 rte_mempool_calc_obj_size(mb_pool->elt_size,
952 mb_pool->flags, NULL);
953 usr_config.frame_headroom = mb_pool->header_size +
954 sizeof(struct rte_mbuf) +
955 rte_pktmbuf_priv_size(mb_pool) +
956 RTE_PKTMBUF_HEADROOM;
958 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
961 AF_XDP_LOG(ERR, "Failed to allocate umem info");
965 umem->mb_pool = mb_pool;
966 base_addr = (void *)get_base_addr(mb_pool, &align);
967 umem_size = (uint64_t)mb_pool->populated_size *
968 (uint64_t)usr_config.frame_size +
971 ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
972 &rxq->fq, &rxq->cq, &usr_config);
974 AF_XDP_LOG(ERR, "Failed to create umem");
977 umem->buffer = base_addr;
979 if (internals->shared_umem) {
980 umem->max_xsks = mb_pool->populated_size /
981 ETH_AF_XDP_NUM_BUFFERS;
982 AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
983 mb_pool->name, umem->max_xsks);
986 __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
991 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
992 struct pkt_rx_queue *rxq)
994 struct xsk_umem_info *umem;
995 const struct rte_memzone *mz;
996 struct xsk_umem_config usr_config = {
997 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
998 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
999 .frame_size = ETH_AF_XDP_FRAME_SIZE,
1000 .frame_headroom = 0 };
1001 char ring_name[RTE_RING_NAMESIZE];
1002 char mz_name[RTE_MEMZONE_NAMESIZE];
1006 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
1008 AF_XDP_LOG(ERR, "Failed to allocate umem info");
1012 snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
1013 internals->if_name, rxq->xsk_queue_idx);
1014 umem->buf_ring = rte_ring_create(ring_name,
1015 ETH_AF_XDP_NUM_BUFFERS,
1018 if (umem->buf_ring == NULL) {
1019 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
1023 for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
1024 rte_ring_enqueue(umem->buf_ring,
1025 (void *)(i * ETH_AF_XDP_FRAME_SIZE));
1027 snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
1028 internals->if_name, rxq->xsk_queue_idx);
1029 mz = rte_memzone_reserve_aligned(mz_name,
1030 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1031 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1034 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
1038 ret = xsk_umem__create(&umem->umem, mz->addr,
1039 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1044 AF_XDP_LOG(ERR, "Failed to create umem");
1053 xdp_umem_destroy(umem);
1058 load_custom_xdp_prog(const char *prog_path, int if_index)
1060 int ret, prog_fd = -1;
1061 struct bpf_object *obj;
1062 struct bpf_map *map;
1064 ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
1066 AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
1071 * The loaded program must provision for a map of xsks, such that some
1072 * traffic can be redirected to userspace. When the xsk is created,
1073 * libbpf inserts it into the map.
1075 map = bpf_object__find_map_by_name(obj, "xsks_map");
1077 AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
1081 /* Link the program with the given network device */
1082 ret = bpf_set_link_xdp_fd(if_index, prog_fd,
1083 XDP_FLAGS_UPDATE_IF_NOEXIST);
1085 AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
1090 AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
1091 prog_path, prog_fd);
1097 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
1100 struct xsk_socket_config cfg;
1101 struct pkt_tx_queue *txq = rxq->pair;
1103 int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS;
1104 struct rte_mbuf *fq_bufs[reserve_size];
1106 rxq->umem = xdp_umem_configure(internals, rxq);
1107 if (rxq->umem == NULL)
1109 txq->umem = rxq->umem;
1111 cfg.rx_size = ring_size;
1112 cfg.tx_size = ring_size;
1113 cfg.libbpf_flags = 0;
1114 cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
1117 #if defined(XDP_USE_NEED_WAKEUP)
1118 cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
1121 if (strnlen(internals->prog_path, PATH_MAX) &&
1122 !internals->custom_prog_configured) {
1123 ret = load_custom_xdp_prog(internals->prog_path,
1124 internals->if_index);
1126 AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
1127 internals->prog_path);
1130 internals->custom_prog_configured = 1;
1133 if (internals->shared_umem)
1134 ret = create_shared_socket(&rxq->xsk, internals->if_name,
1135 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1136 &txq->tx, &rxq->fq, &rxq->cq, &cfg);
1138 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
1139 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1143 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
1147 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1148 if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) {
1149 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
1153 ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
1155 xsk_socket__delete(rxq->xsk);
1156 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
1163 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
1164 xdp_umem_destroy(rxq->umem);
1170 eth_rx_queue_setup(struct rte_eth_dev *dev,
1171 uint16_t rx_queue_id,
1172 uint16_t nb_rx_desc,
1173 unsigned int socket_id __rte_unused,
1174 const struct rte_eth_rxconf *rx_conf __rte_unused,
1175 struct rte_mempool *mb_pool)
1177 struct pmd_internals *internals = dev->data->dev_private;
1178 struct pkt_rx_queue *rxq;
1181 rxq = &internals->rx_queues[rx_queue_id];
1183 AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
1184 rx_queue_id, rxq->xsk_queue_idx);
1186 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
1187 uint32_t buf_size, data_size;
1189 /* Now get the space available for data in the mbuf */
1190 buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1191 RTE_PKTMBUF_HEADROOM;
1192 data_size = ETH_AF_XDP_FRAME_SIZE;
1194 if (data_size > buf_size) {
1195 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
1196 dev->device->name, data_size, buf_size);
1202 rxq->mb_pool = mb_pool;
1204 if (xsk_configure(internals, rxq, nb_rx_desc)) {
1205 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
1210 rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
1211 rxq->fds[0].events = POLLIN;
1213 dev->data->rx_queues[rx_queue_id] = rxq;
1221 eth_tx_queue_setup(struct rte_eth_dev *dev,
1222 uint16_t tx_queue_id,
1223 uint16_t nb_tx_desc __rte_unused,
1224 unsigned int socket_id __rte_unused,
1225 const struct rte_eth_txconf *tx_conf __rte_unused)
1227 struct pmd_internals *internals = dev->data->dev_private;
1228 struct pkt_tx_queue *txq;
1230 txq = &internals->tx_queues[tx_queue_id];
1232 dev->data->tx_queues[tx_queue_id] = txq;
1237 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1239 struct pmd_internals *internals = dev->data->dev_private;
1240 struct ifreq ifr = { .ifr_mtu = mtu };
1244 s = socket(PF_INET, SOCK_DGRAM, 0);
1248 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
1249 ret = ioctl(s, SIOCSIFMTU, &ifr);
1252 return (ret < 0) ? -errno : 0;
1256 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1262 s = socket(PF_INET, SOCK_DGRAM, 0);
1266 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1267 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1271 ifr.ifr_flags &= mask;
1272 ifr.ifr_flags |= flags;
1273 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1283 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1285 struct pmd_internals *internals = dev->data->dev_private;
1287 return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1291 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1293 struct pmd_internals *internals = dev->data->dev_private;
1295 return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1298 static const struct eth_dev_ops ops = {
1299 .dev_start = eth_dev_start,
1300 .dev_stop = eth_dev_stop,
1301 .dev_close = eth_dev_close,
1302 .dev_configure = eth_dev_configure,
1303 .dev_infos_get = eth_dev_info,
1304 .mtu_set = eth_dev_mtu_set,
1305 .promiscuous_enable = eth_dev_promiscuous_enable,
1306 .promiscuous_disable = eth_dev_promiscuous_disable,
1307 .rx_queue_setup = eth_rx_queue_setup,
1308 .tx_queue_setup = eth_tx_queue_setup,
1309 .rx_queue_release = eth_queue_release,
1310 .tx_queue_release = eth_queue_release,
1311 .link_update = eth_link_update,
1312 .stats_get = eth_stats_get,
1313 .stats_reset = eth_stats_reset,
1316 /** parse integer from integer argument */
1318 parse_integer_arg(const char *key __rte_unused,
1319 const char *value, void *extra_args)
1321 int *i = (int *)extra_args;
1324 *i = strtol(value, &end, 10);
1326 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1333 /** parse name argument */
1335 parse_name_arg(const char *key __rte_unused,
1336 const char *value, void *extra_args)
1338 char *name = extra_args;
1340 if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1341 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1346 strlcpy(name, value, IFNAMSIZ);
1351 /** parse xdp prog argument */
1353 parse_prog_arg(const char *key __rte_unused,
1354 const char *value, void *extra_args)
1356 char *path = extra_args;
1358 if (strnlen(value, PATH_MAX) == PATH_MAX) {
1359 AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
1364 if (access(value, F_OK) != 0) {
1365 AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
1366 value, strerror(errno));
1370 strlcpy(path, value, PATH_MAX);
1376 xdp_get_channels_info(const char *if_name, int *max_queues,
1377 int *combined_queues)
1379 struct ethtool_channels channels;
1383 fd = socket(AF_INET, SOCK_DGRAM, 0);
1387 channels.cmd = ETHTOOL_GCHANNELS;
1388 ifr.ifr_data = (void *)&channels;
1389 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1390 ret = ioctl(fd, SIOCETHTOOL, &ifr);
1392 if (errno == EOPNOTSUPP) {
1400 if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1401 /* If the device says it has no channels, then all traffic
1402 * is sent to a single stream, so max queues = 1.
1405 *combined_queues = 1;
1407 *max_queues = channels.max_combined;
1408 *combined_queues = channels.combined_count;
1417 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1418 int *queue_cnt, int *shared_umem, char *prog_path)
1422 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1423 &parse_name_arg, if_name);
1427 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1428 &parse_integer_arg, start_queue);
1432 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1433 &parse_integer_arg, queue_cnt);
1434 if (ret < 0 || *queue_cnt <= 0) {
1439 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG,
1440 &parse_integer_arg, shared_umem);
1444 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
1445 &parse_prog_arg, prog_path);
1450 rte_kvargs_free(kvlist);
1455 get_iface_info(const char *if_name,
1456 struct rte_ether_addr *eth_addr,
1460 int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1465 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1466 if (ioctl(sock, SIOCGIFINDEX, &ifr))
1469 *if_index = ifr.ifr_ifindex;
1471 if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1474 rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1484 static struct rte_eth_dev *
1485 init_internals(struct rte_vdev_device *dev, const char *if_name,
1486 int start_queue_idx, int queue_cnt, int shared_umem,
1487 const char *prog_path)
1489 const char *name = rte_vdev_device_name(dev);
1490 const unsigned int numa_node = dev->device.numa_node;
1491 struct pmd_internals *internals;
1492 struct rte_eth_dev *eth_dev;
1496 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1497 if (internals == NULL)
1500 internals->start_queue_idx = start_queue_idx;
1501 internals->queue_cnt = queue_cnt;
1502 strlcpy(internals->if_name, if_name, IFNAMSIZ);
1503 strlcpy(internals->prog_path, prog_path, PATH_MAX);
1504 internals->custom_prog_configured = 0;
1506 #ifndef ETH_AF_XDP_SHARED_UMEM
1508 AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
1509 "Check kernel and libbpf version\n");
1510 goto err_free_internals;
1513 internals->shared_umem = shared_umem;
1515 if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1516 &internals->combined_queue_cnt)) {
1517 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1519 goto err_free_internals;
1522 if (queue_cnt > internals->combined_queue_cnt) {
1523 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1524 queue_cnt, internals->combined_queue_cnt);
1525 goto err_free_internals;
1528 internals->rx_queues = rte_zmalloc_socket(NULL,
1529 sizeof(struct pkt_rx_queue) * queue_cnt,
1531 if (internals->rx_queues == NULL) {
1532 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1533 goto err_free_internals;
1536 internals->tx_queues = rte_zmalloc_socket(NULL,
1537 sizeof(struct pkt_tx_queue) * queue_cnt,
1539 if (internals->tx_queues == NULL) {
1540 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1543 for (i = 0; i < queue_cnt; i++) {
1544 internals->tx_queues[i].pair = &internals->rx_queues[i];
1545 internals->rx_queues[i].pair = &internals->tx_queues[i];
1546 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1547 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1550 ret = get_iface_info(if_name, &internals->eth_addr,
1551 &internals->if_index);
1555 eth_dev = rte_eth_vdev_allocate(dev, 0);
1556 if (eth_dev == NULL)
1559 eth_dev->data->dev_private = internals;
1560 eth_dev->data->dev_link = pmd_link;
1561 eth_dev->data->mac_addrs = &internals->eth_addr;
1562 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1563 eth_dev->dev_ops = &ops;
1564 eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1565 eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1567 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1568 AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1574 rte_free(internals->tx_queues);
1576 rte_free(internals->rx_queues);
1578 rte_free(internals);
1583 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1585 struct rte_kvargs *kvlist;
1586 char if_name[IFNAMSIZ] = {'\0'};
1587 int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1588 int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1589 int shared_umem = 0;
1590 char prog_path[PATH_MAX] = {'\0'};
1591 struct rte_eth_dev *eth_dev = NULL;
1594 AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1595 rte_vdev_device_name(dev));
1597 name = rte_vdev_device_name(dev);
1598 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1599 strlen(rte_vdev_device_args(dev)) == 0) {
1600 eth_dev = rte_eth_dev_attach_secondary(name);
1601 if (eth_dev == NULL) {
1602 AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1605 eth_dev->dev_ops = &ops;
1606 rte_eth_dev_probing_finish(eth_dev);
1610 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1611 if (kvlist == NULL) {
1612 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1616 if (dev->device.numa_node == SOCKET_ID_ANY)
1617 dev->device.numa_node = rte_socket_id();
1619 if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1620 &xsk_queue_cnt, &shared_umem, prog_path) < 0) {
1621 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1625 if (strlen(if_name) == 0) {
1626 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1630 eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1631 xsk_queue_cnt, shared_umem, prog_path);
1632 if (eth_dev == NULL) {
1633 AF_XDP_LOG(ERR, "Failed to init internals\n");
1637 rte_eth_dev_probing_finish(eth_dev);
1643 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1645 struct rte_eth_dev *eth_dev = NULL;
1647 AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1653 /* find the ethdev entry */
1654 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1655 if (eth_dev == NULL)
1658 eth_dev_close(eth_dev);
1659 rte_eth_dev_release_port(eth_dev);
1665 static struct rte_vdev_driver pmd_af_xdp_drv = {
1666 .probe = rte_pmd_af_xdp_probe,
1667 .remove = rte_pmd_af_xdp_remove,
1670 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1671 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1673 "start_queue=<int> "
1674 "queue_count=<int> "
1675 "shared_umem=<int> "
1676 "xdp_prog=<string> ");