1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation.
8 #include <netinet/in.h>
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_xdp.h>
14 #include <linux/if_link.h>
15 #include <linux/ethtool.h>
16 #include <linux/sockios.h>
17 #include "af_xdp_deps.h"
21 #include <rte_ethdev.h>
22 #include <ethdev_driver.h>
23 #include <ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_bus_vdev.h>
26 #include <rte_string_fns.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
31 #include <rte_ether.h>
32 #include <rte_lcore.h>
34 #include <rte_memory.h>
35 #include <rte_memzone.h>
36 #include <rte_mempool.h>
38 #include <rte_malloc.h>
40 #include <rte_spinlock.h>
41 #include <rte_power_intrinsics.h>
45 #ifndef SO_PREFER_BUSY_POLL
46 #define SO_PREFER_BUSY_POLL 69
48 #ifndef SO_BUSY_POLL_BUDGET
49 #define SO_BUSY_POLL_BUDGET 70
65 RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);
67 #define AF_XDP_LOG(level, fmt, args...) \
68 rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
69 "%s(): " fmt, __func__, ##args)
71 #define ETH_AF_XDP_FRAME_SIZE 2048
72 #define ETH_AF_XDP_NUM_BUFFERS 4096
73 #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS
74 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
75 #define ETH_AF_XDP_DFLT_QUEUE_COUNT 1
76 #define ETH_AF_XDP_DFLT_BUSY_BUDGET 64
77 #define ETH_AF_XDP_DFLT_BUSY_TIMEOUT 20
79 #define ETH_AF_XDP_RX_BATCH_SIZE XSK_RING_CONS__DEFAULT_NUM_DESCS
80 #define ETH_AF_XDP_TX_BATCH_SIZE XSK_RING_CONS__DEFAULT_NUM_DESCS
82 #define ETH_AF_XDP_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
84 struct xsk_umem_info {
85 struct xsk_umem *umem;
86 struct rte_ring *buf_ring;
87 const struct rte_memzone *mz;
88 struct rte_mempool *mb_pool;
100 struct pkt_rx_queue {
101 struct xsk_ring_cons rx;
102 struct xsk_umem_info *umem;
103 struct xsk_socket *xsk;
104 struct rte_mempool *mb_pool;
106 struct rx_stats stats;
108 struct xsk_ring_prod fq;
109 struct xsk_ring_cons cq;
111 struct pkt_tx_queue *pair;
112 struct pollfd fds[1];
123 struct pkt_tx_queue {
124 struct xsk_ring_prod tx;
125 struct xsk_umem_info *umem;
127 struct tx_stats stats;
129 struct pkt_rx_queue *pair;
133 struct pmd_internals {
135 char if_name[IFNAMSIZ];
139 int combined_queue_cnt;
141 char prog_path[PATH_MAX];
142 bool custom_prog_configured;
145 struct rte_ether_addr eth_addr;
147 struct pkt_rx_queue *rx_queues;
148 struct pkt_tx_queue *tx_queues;
151 #define ETH_AF_XDP_IFACE_ARG "iface"
152 #define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
153 #define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
154 #define ETH_AF_XDP_SHARED_UMEM_ARG "shared_umem"
155 #define ETH_AF_XDP_PROG_ARG "xdp_prog"
156 #define ETH_AF_XDP_BUDGET_ARG "busy_budget"
158 static const char * const valid_arguments[] = {
159 ETH_AF_XDP_IFACE_ARG,
160 ETH_AF_XDP_START_QUEUE_ARG,
161 ETH_AF_XDP_QUEUE_COUNT_ARG,
162 ETH_AF_XDP_SHARED_UMEM_ARG,
164 ETH_AF_XDP_BUDGET_ARG,
168 static const struct rte_eth_link pmd_link = {
169 .link_speed = RTE_ETH_SPEED_NUM_10G,
170 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
171 .link_status = RTE_ETH_LINK_DOWN,
172 .link_autoneg = RTE_ETH_LINK_AUTONEG
175 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
176 struct internal_list {
177 TAILQ_ENTRY(internal_list) next;
178 struct rte_eth_dev *eth_dev;
181 TAILQ_HEAD(internal_list_head, internal_list);
182 static struct internal_list_head internal_list =
183 TAILQ_HEAD_INITIALIZER(internal_list);
185 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
187 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
189 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
190 struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
195 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
196 for (i = 0; i < reserve_size; i++)
197 rte_pktmbuf_free(bufs[i]);
198 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
202 for (i = 0; i < reserve_size; i++) {
206 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
207 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
208 umem->mb_pool->header_size;
212 xsk_ring_prod__submit(fq, reserve_size);
218 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
219 struct rte_mbuf **bufs __rte_unused,
220 struct xsk_ring_prod *fq)
222 void *addrs[reserve_size];
226 if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
228 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
232 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
233 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
234 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
239 for (i = 0; i < reserve_size; i++) {
242 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
243 *fq_addr = (uint64_t)addrs[i];
246 xsk_ring_prod__submit(fq, reserve_size);
253 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
254 struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
256 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
257 return reserve_fill_queue_zc(umem, reserve_size, bufs, fq);
259 return reserve_fill_queue_cp(umem, reserve_size, bufs, fq);
263 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
265 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
267 struct pkt_rx_queue *rxq = queue;
268 struct xsk_ring_cons *rx = &rxq->rx;
269 struct xsk_ring_prod *fq = &rxq->fq;
270 struct xsk_umem_info *umem = rxq->umem;
272 unsigned long rx_bytes = 0;
274 struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
276 nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
279 /* we can assume a kernel >= 5.11 is in use if busy polling is
280 * enabled and thus we can safely use the recvfrom() syscall
281 * which is only supported for AF_XDP sockets in kernels >=
284 if (rxq->busy_budget) {
285 (void)recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
286 MSG_DONTWAIT, NULL, NULL);
287 } else if (xsk_ring_prod__needs_wakeup(fq)) {
288 (void)poll(&rxq->fds[0], 1, 1000);
294 /* allocate bufs for fill queue replenishment after rx */
295 if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
297 "Failed to get enough buffers for fq.\n");
298 /* rollback cached_cons which is added by
299 * xsk_ring_cons__peek
301 rx->cached_cons -= nb_pkts;
305 for (i = 0; i < nb_pkts; i++) {
306 const struct xdp_desc *desc;
311 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
315 offset = xsk_umem__extract_offset(addr);
316 addr = xsk_umem__extract_addr(addr);
318 bufs[i] = (struct rte_mbuf *)
319 xsk_umem__get_data(umem->buffer, addr +
320 umem->mb_pool->header_size);
321 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
322 rte_pktmbuf_priv_size(umem->mb_pool) -
323 umem->mb_pool->header_size;
325 rte_pktmbuf_pkt_len(bufs[i]) = len;
326 rte_pktmbuf_data_len(bufs[i]) = len;
330 xsk_ring_cons__release(rx, nb_pkts);
331 (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
334 rxq->stats.rx_pkts += nb_pkts;
335 rxq->stats.rx_bytes += rx_bytes;
341 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
343 struct pkt_rx_queue *rxq = queue;
344 struct xsk_ring_cons *rx = &rxq->rx;
345 struct xsk_umem_info *umem = rxq->umem;
346 struct xsk_ring_prod *fq = &rxq->fq;
348 unsigned long rx_bytes = 0;
350 uint32_t free_thresh = fq->size >> 1;
351 struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
353 if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
354 (void)reserve_fill_queue(umem, nb_pkts, NULL, fq);
356 nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
358 #if defined(XDP_USE_NEED_WAKEUP)
359 if (xsk_ring_prod__needs_wakeup(fq))
360 (void)poll(rxq->fds, 1, 1000);
365 if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
366 /* rollback cached_cons which is added by
367 * xsk_ring_cons__peek
369 rx->cached_cons -= nb_pkts;
373 for (i = 0; i < nb_pkts; i++) {
374 const struct xdp_desc *desc;
379 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
382 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
384 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
385 rte_ring_enqueue(umem->buf_ring, (void *)addr);
386 rte_pktmbuf_pkt_len(mbufs[i]) = len;
387 rte_pktmbuf_data_len(mbufs[i]) = len;
392 xsk_ring_cons__release(rx, nb_pkts);
395 rxq->stats.rx_pkts += nb_pkts;
396 rxq->stats.rx_bytes += rx_bytes;
403 af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
405 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
406 return af_xdp_rx_zc(queue, bufs, nb_pkts);
408 return af_xdp_rx_cp(queue, bufs, nb_pkts);
413 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
417 if (likely(nb_pkts <= ETH_AF_XDP_RX_BATCH_SIZE))
418 return af_xdp_rx(queue, bufs, nb_pkts);
420 /* Split larger batch into smaller batches of size
421 * ETH_AF_XDP_RX_BATCH_SIZE or less.
427 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
428 ret = af_xdp_rx(queue, &bufs[nb_rx], n);
429 nb_rx = (uint16_t)(nb_rx + ret);
430 nb_pkts = (uint16_t)(nb_pkts - ret);
439 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
444 n = xsk_ring_cons__peek(cq, size, &idx_cq);
446 for (i = 0; i < n; i++) {
448 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
449 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
450 addr = xsk_umem__extract_addr(addr);
451 rte_pktmbuf_free((struct rte_mbuf *)
452 xsk_umem__get_data(umem->buffer,
453 addr + umem->mb_pool->header_size));
455 rte_ring_enqueue(umem->buf_ring, (void *)addr);
459 xsk_ring_cons__release(cq, n);
463 kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
465 struct xsk_umem_info *umem = txq->umem;
467 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
469 if (tx_syscall_needed(&txq->tx))
470 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
471 0, MSG_DONTWAIT) < 0) {
472 /* some thing unexpected */
473 if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
476 /* pull from completion queue to leave more space */
479 XSK_RING_CONS__DEFAULT_NUM_DESCS,
484 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
486 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
488 struct pkt_tx_queue *txq = queue;
489 struct xsk_umem_info *umem = txq->umem;
490 struct rte_mbuf *mbuf;
491 unsigned long tx_bytes = 0;
495 struct xdp_desc *desc;
496 uint64_t addr, offset;
497 struct xsk_ring_cons *cq = &txq->pair->cq;
498 uint32_t free_thresh = cq->size >> 1;
500 if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
501 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
503 for (i = 0; i < nb_pkts; i++) {
506 if (mbuf->pool == umem->mb_pool) {
507 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
509 if (!xsk_ring_prod__reserve(&txq->tx, 1,
513 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
514 desc->len = mbuf->pkt_len;
515 addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
516 umem->mb_pool->header_size;
517 offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
519 umem->mb_pool->header_size;
520 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
521 desc->addr = addr | offset;
524 struct rte_mbuf *local_mbuf =
525 rte_pktmbuf_alloc(umem->mb_pool);
528 if (local_mbuf == NULL)
531 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
532 rte_pktmbuf_free(local_mbuf);
536 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
537 desc->len = mbuf->pkt_len;
539 addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
540 umem->mb_pool->header_size;
541 offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
542 (uint64_t)local_mbuf +
543 umem->mb_pool->header_size;
544 pkt = xsk_umem__get_data(umem->buffer, addr + offset);
545 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
546 desc->addr = addr | offset;
547 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
549 rte_pktmbuf_free(mbuf);
553 tx_bytes += mbuf->pkt_len;
557 xsk_ring_prod__submit(&txq->tx, count);
560 txq->stats.tx_pkts += count;
561 txq->stats.tx_bytes += tx_bytes;
562 txq->stats.tx_dropped += nb_pkts - count;
568 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
570 struct pkt_tx_queue *txq = queue;
571 struct xsk_umem_info *umem = txq->umem;
572 struct rte_mbuf *mbuf;
573 void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
574 unsigned long tx_bytes = 0;
577 struct xsk_ring_cons *cq = &txq->pair->cq;
579 pull_umem_cq(umem, nb_pkts, cq);
581 nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
586 if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
588 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
592 for (i = 0; i < nb_pkts; i++) {
593 struct xdp_desc *desc;
596 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
598 desc->len = mbuf->pkt_len;
600 desc->addr = (uint64_t)addrs[i];
601 pkt = xsk_umem__get_data(umem->mz->addr,
603 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
604 tx_bytes += mbuf->pkt_len;
605 rte_pktmbuf_free(mbuf);
608 xsk_ring_prod__submit(&txq->tx, nb_pkts);
612 txq->stats.tx_pkts += nb_pkts;
613 txq->stats.tx_bytes += tx_bytes;
619 af_xdp_tx_cp_batch(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
623 if (likely(nb_pkts <= ETH_AF_XDP_TX_BATCH_SIZE))
624 return af_xdp_tx_cp(queue, bufs, nb_pkts);
630 /* Split larger batch into smaller batches of size
631 * ETH_AF_XDP_TX_BATCH_SIZE or less.
633 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
634 ret = af_xdp_tx_cp(queue, &bufs[nb_tx], n);
635 nb_tx = (uint16_t)(nb_tx + ret);
636 nb_pkts = (uint16_t)(nb_pkts - ret);
646 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
648 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
649 return af_xdp_tx_zc(queue, bufs, nb_pkts);
651 return af_xdp_tx_cp_batch(queue, bufs, nb_pkts);
656 eth_dev_start(struct rte_eth_dev *dev)
658 dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
663 /* This function gets called when the current port gets stopped. */
665 eth_dev_stop(struct rte_eth_dev *dev)
667 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
671 /* Find ethdev in list */
672 static inline struct internal_list *
673 find_internal_resource(struct pmd_internals *port_int)
676 struct internal_list *list = NULL;
678 if (port_int == NULL)
681 pthread_mutex_lock(&internal_list_lock);
683 TAILQ_FOREACH(list, &internal_list, next) {
684 struct pmd_internals *list_int =
685 list->eth_dev->data->dev_private;
686 if (list_int == port_int) {
692 pthread_mutex_unlock(&internal_list_lock);
701 eth_dev_configure(struct rte_eth_dev *dev)
703 struct pmd_internals *internal = dev->data->dev_private;
705 /* rx/tx must be paired */
706 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
709 if (internal->shared_umem) {
710 struct internal_list *list = NULL;
711 const char *name = dev->device->name;
713 /* Ensure PMD is not already inserted into the list */
714 list = find_internal_resource(internal);
718 list = rte_zmalloc_socket(name, sizeof(*list), 0,
719 dev->device->numa_node);
724 pthread_mutex_lock(&internal_list_lock);
725 TAILQ_INSERT_TAIL(&internal_list, list, next);
726 pthread_mutex_unlock(&internal_list_lock);
732 #define CLB_VAL_IDX 0
734 eth_monitor_callback(const uint64_t value,
735 const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
737 const uint64_t v = opaque[CLB_VAL_IDX];
738 const uint64_t m = (uint32_t)~0;
740 /* if the value has changed, abort entering power optimized state */
741 return (value & m) == v ? 0 : -1;
745 eth_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
747 struct pkt_rx_queue *rxq = rx_queue;
748 unsigned int *prod = rxq->rx.producer;
749 const uint32_t cur_val = rxq->rx.cached_prod; /* use cached value */
751 /* watch for changes in producer ring */
752 pmc->addr = (void *)prod;
754 /* store current value */
755 pmc->opaque[CLB_VAL_IDX] = cur_val;
756 pmc->fn = eth_monitor_callback;
758 /* AF_XDP producer ring index is 32-bit */
759 pmc->size = sizeof(uint32_t);
765 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
767 struct pmd_internals *internals = dev->data->dev_private;
769 dev_info->if_index = internals->if_index;
770 dev_info->max_mac_addrs = 1;
771 dev_info->max_rx_queues = internals->queue_cnt;
772 dev_info->max_tx_queues = internals->queue_cnt;
774 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
775 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
776 dev_info->max_rx_pktlen = getpagesize() -
777 sizeof(struct rte_mempool_objhdr) -
778 sizeof(struct rte_mbuf) -
779 RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
781 dev_info->max_rx_pktlen = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
783 dev_info->max_mtu = dev_info->max_rx_pktlen - ETH_AF_XDP_ETH_OVERHEAD;
785 dev_info->default_rxportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
786 dev_info->default_txportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
787 dev_info->default_rxportconf.nb_queues = 1;
788 dev_info->default_txportconf.nb_queues = 1;
789 dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
790 dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
796 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
798 struct pmd_internals *internals = dev->data->dev_private;
799 struct xdp_statistics xdp_stats;
800 struct pkt_rx_queue *rxq;
801 struct pkt_tx_queue *txq;
805 for (i = 0; i < dev->data->nb_rx_queues; i++) {
806 optlen = sizeof(struct xdp_statistics);
807 rxq = &internals->rx_queues[i];
809 stats->q_ipackets[i] = rxq->stats.rx_pkts;
810 stats->q_ibytes[i] = rxq->stats.rx_bytes;
812 stats->q_opackets[i] = txq->stats.tx_pkts;
813 stats->q_obytes[i] = txq->stats.tx_bytes;
815 stats->ipackets += stats->q_ipackets[i];
816 stats->ibytes += stats->q_ibytes[i];
817 stats->imissed += rxq->stats.rx_dropped;
818 stats->oerrors += txq->stats.tx_dropped;
819 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
820 XDP_STATISTICS, &xdp_stats, &optlen);
822 AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
825 stats->imissed += xdp_stats.rx_dropped;
827 stats->opackets += stats->q_opackets[i];
828 stats->obytes += stats->q_obytes[i];
835 eth_stats_reset(struct rte_eth_dev *dev)
837 struct pmd_internals *internals = dev->data->dev_private;
840 for (i = 0; i < internals->queue_cnt; i++) {
841 memset(&internals->rx_queues[i].stats, 0,
842 sizeof(struct rx_stats));
843 memset(&internals->tx_queues[i].stats, 0,
844 sizeof(struct tx_stats));
851 remove_xdp_program(struct pmd_internals *internals)
853 uint32_t curr_prog_id = 0;
855 if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
856 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
857 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
860 bpf_set_link_xdp_fd(internals->if_index, -1,
861 XDP_FLAGS_UPDATE_IF_NOEXIST);
865 xdp_umem_destroy(struct xsk_umem_info *umem)
867 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
868 umem->mb_pool = NULL;
870 rte_memzone_free(umem->mz);
873 rte_ring_free(umem->buf_ring);
874 umem->buf_ring = NULL;
881 eth_dev_close(struct rte_eth_dev *dev)
883 struct pmd_internals *internals = dev->data->dev_private;
884 struct pkt_rx_queue *rxq;
887 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
890 AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
893 for (i = 0; i < internals->queue_cnt; i++) {
894 rxq = &internals->rx_queues[i];
895 if (rxq->umem == NULL)
897 xsk_socket__delete(rxq->xsk);
899 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE)
901 (void)xsk_umem__delete(rxq->umem->umem);
902 xdp_umem_destroy(rxq->umem);
905 /* free pkt_tx_queue */
911 * MAC is not allocated dynamically, setting it to NULL would prevent
912 * from releasing it in rte_eth_dev_release_port.
914 dev->data->mac_addrs = NULL;
916 remove_xdp_program(internals);
918 if (internals->shared_umem) {
919 struct internal_list *list;
921 /* Remove ethdev from list used to track and share UMEMs */
922 list = find_internal_resource(internals);
924 pthread_mutex_lock(&internal_list_lock);
925 TAILQ_REMOVE(&internal_list, list, next);
926 pthread_mutex_unlock(&internal_list_lock);
935 eth_link_update(struct rte_eth_dev *dev __rte_unused,
936 int wait_to_complete __rte_unused)
941 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
942 static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
944 struct rte_mempool_memhdr *memhdr;
945 uintptr_t memhdr_addr, aligned_addr;
947 memhdr = STAILQ_FIRST(&mp->mem_list);
948 memhdr_addr = (uintptr_t)memhdr->addr;
949 aligned_addr = memhdr_addr & ~(getpagesize() - 1);
950 *align = memhdr_addr - aligned_addr;
955 /* Check if the netdev,qid context already exists */
957 ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
958 struct pkt_rx_queue *list_rxq, const char *list_ifname)
962 if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
963 !strncmp(ifname, list_ifname, IFNAMSIZ)) {
964 AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
965 ifname, rxq->xsk_queue_idx);
972 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
974 get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
975 struct xsk_umem_info **umem)
977 struct internal_list *list;
978 struct pmd_internals *internals;
980 struct rte_mempool *mb_pool = rxq->mb_pool;
985 pthread_mutex_lock(&internal_list_lock);
987 TAILQ_FOREACH(list, &internal_list, next) {
988 internals = list->eth_dev->data->dev_private;
989 for (i = 0; i < internals->queue_cnt; i++) {
990 struct pkt_rx_queue *list_rxq =
991 &internals->rx_queues[i];
994 if (mb_pool == internals->rx_queues[i].mb_pool) {
995 if (ctx_exists(rxq, ifname, list_rxq,
996 internals->if_name)) {
1000 if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
1001 __ATOMIC_ACQUIRE)) {
1002 *umem = internals->rx_queues[i].umem;
1010 pthread_mutex_unlock(&internal_list_lock);
1016 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
1017 struct pkt_rx_queue *rxq)
1019 struct xsk_umem_info *umem = NULL;
1021 struct xsk_umem_config usr_config = {
1022 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2,
1023 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1024 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
1025 void *base_addr = NULL;
1026 struct rte_mempool *mb_pool = rxq->mb_pool;
1027 uint64_t umem_size, align = 0;
1029 if (internals->shared_umem) {
1030 if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
1034 __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
1036 AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
1037 internals->if_name, rxq->xsk_queue_idx);
1038 __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
1043 usr_config.frame_size =
1044 rte_mempool_calc_obj_size(mb_pool->elt_size,
1045 mb_pool->flags, NULL);
1046 usr_config.frame_headroom = mb_pool->header_size +
1047 sizeof(struct rte_mbuf) +
1048 rte_pktmbuf_priv_size(mb_pool) +
1049 RTE_PKTMBUF_HEADROOM;
1051 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
1054 AF_XDP_LOG(ERR, "Failed to allocate umem info");
1058 umem->mb_pool = mb_pool;
1059 base_addr = (void *)get_base_addr(mb_pool, &align);
1060 umem_size = (uint64_t)mb_pool->populated_size *
1061 (uint64_t)usr_config.frame_size +
1064 ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
1065 &rxq->fq, &rxq->cq, &usr_config);
1067 AF_XDP_LOG(ERR, "Failed to create umem");
1070 umem->buffer = base_addr;
1072 if (internals->shared_umem) {
1073 umem->max_xsks = mb_pool->populated_size /
1074 ETH_AF_XDP_NUM_BUFFERS;
1075 AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
1076 mb_pool->name, umem->max_xsks);
1079 __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
1084 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
1085 struct pkt_rx_queue *rxq)
1087 struct xsk_umem_info *umem;
1088 const struct rte_memzone *mz;
1089 struct xsk_umem_config usr_config = {
1090 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1091 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1092 .frame_size = ETH_AF_XDP_FRAME_SIZE,
1093 .frame_headroom = 0 };
1094 char ring_name[RTE_RING_NAMESIZE];
1095 char mz_name[RTE_MEMZONE_NAMESIZE];
1099 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
1101 AF_XDP_LOG(ERR, "Failed to allocate umem info");
1105 snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
1106 internals->if_name, rxq->xsk_queue_idx);
1107 umem->buf_ring = rte_ring_create(ring_name,
1108 ETH_AF_XDP_NUM_BUFFERS,
1111 if (umem->buf_ring == NULL) {
1112 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
1116 for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
1117 rte_ring_enqueue(umem->buf_ring,
1118 (void *)(i * ETH_AF_XDP_FRAME_SIZE));
1120 snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
1121 internals->if_name, rxq->xsk_queue_idx);
1122 mz = rte_memzone_reserve_aligned(mz_name,
1123 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1124 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1127 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
1131 ret = xsk_umem__create(&umem->umem, mz->addr,
1132 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1137 AF_XDP_LOG(ERR, "Failed to create umem");
1146 xdp_umem_destroy(umem);
1151 load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map)
1153 int ret, prog_fd = -1;
1154 struct bpf_object *obj;
1156 ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
1158 AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
1163 * The loaded program must provision for a map of xsks, such that some
1164 * traffic can be redirected to userspace.
1166 *map = bpf_object__find_map_by_name(obj, "xsks_map");
1168 AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
1172 /* Link the program with the given network device */
1173 ret = bpf_set_link_xdp_fd(if_index, prog_fd,
1174 XDP_FLAGS_UPDATE_IF_NOEXIST);
1176 AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
1181 AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
1182 prog_path, prog_fd);
1187 /* Detect support for busy polling through setsockopt(). */
1189 configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
1192 int fd = xsk_socket__fd(rxq->xsk);
1195 ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
1196 (void *)&sock_opt, sizeof(sock_opt));
1198 AF_XDP_LOG(DEBUG, "Failed to set SO_PREFER_BUSY_POLL\n");
1202 sock_opt = ETH_AF_XDP_DFLT_BUSY_TIMEOUT;
1203 ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
1206 AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL\n");
1210 sock_opt = rxq->busy_budget;
1211 ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL_BUDGET,
1212 (void *)&sock_opt, sizeof(sock_opt));
1214 AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL_BUDGET\n");
1216 AF_XDP_LOG(INFO, "Busy polling budget set to: %u\n",
1221 /* setsockopt failure - attempt to restore xsk to default state and
1222 * proceed without busy polling support.
1225 ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
1228 AF_XDP_LOG(ERR, "Failed to unset SO_BUSY_POLL\n");
1234 ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
1235 (void *)&sock_opt, sizeof(sock_opt));
1237 AF_XDP_LOG(ERR, "Failed to unset SO_PREFER_BUSY_POLL\n");
1242 rxq->busy_budget = 0;
1247 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
1250 struct xsk_socket_config cfg;
1251 struct pkt_tx_queue *txq = rxq->pair;
1253 int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS;
1254 struct rte_mbuf *fq_bufs[reserve_size];
1256 rxq->umem = xdp_umem_configure(internals, rxq);
1257 if (rxq->umem == NULL)
1259 txq->umem = rxq->umem;
1261 cfg.rx_size = ring_size;
1262 cfg.tx_size = ring_size;
1263 cfg.libbpf_flags = 0;
1264 cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
1267 #if defined(XDP_USE_NEED_WAKEUP)
1268 cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
1271 if (strnlen(internals->prog_path, PATH_MAX) &&
1272 !internals->custom_prog_configured) {
1273 ret = load_custom_xdp_prog(internals->prog_path,
1274 internals->if_index,
1277 AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
1278 internals->prog_path);
1281 internals->custom_prog_configured = 1;
1282 cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
1285 if (internals->shared_umem)
1286 ret = create_shared_socket(&rxq->xsk, internals->if_name,
1287 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1288 &txq->tx, &rxq->fq, &rxq->cq, &cfg);
1290 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
1291 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1295 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
1299 /* insert the xsk into the xsks_map */
1300 if (internals->custom_prog_configured) {
1303 fd = xsk_socket__fd(rxq->xsk);
1304 err = bpf_map_update_elem(bpf_map__fd(internals->map),
1305 &rxq->xsk_queue_idx, &fd, 0);
1307 AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n");
1312 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1313 ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
1315 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
1320 if (rxq->busy_budget) {
1321 ret = configure_preferred_busy_poll(rxq);
1323 AF_XDP_LOG(ERR, "Failed configure busy polling.\n");
1328 ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
1330 xsk_socket__delete(rxq->xsk);
1331 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
1338 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
1339 xdp_umem_destroy(rxq->umem);
1345 eth_rx_queue_setup(struct rte_eth_dev *dev,
1346 uint16_t rx_queue_id,
1347 uint16_t nb_rx_desc,
1348 unsigned int socket_id __rte_unused,
1349 const struct rte_eth_rxconf *rx_conf __rte_unused,
1350 struct rte_mempool *mb_pool)
1352 struct pmd_internals *internals = dev->data->dev_private;
1353 struct pkt_rx_queue *rxq;
1356 rxq = &internals->rx_queues[rx_queue_id];
1358 AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
1359 rx_queue_id, rxq->xsk_queue_idx);
1361 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
1362 uint32_t buf_size, data_size;
1364 /* Now get the space available for data in the mbuf */
1365 buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1366 RTE_PKTMBUF_HEADROOM;
1367 data_size = ETH_AF_XDP_FRAME_SIZE;
1369 if (data_size > buf_size) {
1370 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
1371 dev->device->name, data_size, buf_size);
1377 rxq->mb_pool = mb_pool;
1379 if (xsk_configure(internals, rxq, nb_rx_desc)) {
1380 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
1385 if (!rxq->busy_budget)
1386 AF_XDP_LOG(DEBUG, "Preferred busy polling not enabled\n");
1388 rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
1389 rxq->fds[0].events = POLLIN;
1391 dev->data->rx_queues[rx_queue_id] = rxq;
1399 eth_tx_queue_setup(struct rte_eth_dev *dev,
1400 uint16_t tx_queue_id,
1401 uint16_t nb_tx_desc __rte_unused,
1402 unsigned int socket_id __rte_unused,
1403 const struct rte_eth_txconf *tx_conf __rte_unused)
1405 struct pmd_internals *internals = dev->data->dev_private;
1406 struct pkt_tx_queue *txq;
1408 txq = &internals->tx_queues[tx_queue_id];
1410 dev->data->tx_queues[tx_queue_id] = txq;
1415 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1417 struct pmd_internals *internals = dev->data->dev_private;
1418 struct ifreq ifr = { .ifr_mtu = mtu };
1422 s = socket(PF_INET, SOCK_DGRAM, 0);
1426 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
1427 ret = ioctl(s, SIOCSIFMTU, &ifr);
1430 return (ret < 0) ? -errno : 0;
1434 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1440 s = socket(PF_INET, SOCK_DGRAM, 0);
1444 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1445 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1449 ifr.ifr_flags &= mask;
1450 ifr.ifr_flags |= flags;
1451 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1461 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1463 struct pmd_internals *internals = dev->data->dev_private;
1465 return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1469 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1471 struct pmd_internals *internals = dev->data->dev_private;
1473 return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1476 static const struct eth_dev_ops ops = {
1477 .dev_start = eth_dev_start,
1478 .dev_stop = eth_dev_stop,
1479 .dev_close = eth_dev_close,
1480 .dev_configure = eth_dev_configure,
1481 .dev_infos_get = eth_dev_info,
1482 .mtu_set = eth_dev_mtu_set,
1483 .promiscuous_enable = eth_dev_promiscuous_enable,
1484 .promiscuous_disable = eth_dev_promiscuous_disable,
1485 .rx_queue_setup = eth_rx_queue_setup,
1486 .tx_queue_setup = eth_tx_queue_setup,
1487 .link_update = eth_link_update,
1488 .stats_get = eth_stats_get,
1489 .stats_reset = eth_stats_reset,
1490 .get_monitor_addr = eth_get_monitor_addr,
1493 /** parse busy_budget argument */
1495 parse_budget_arg(const char *key __rte_unused,
1496 const char *value, void *extra_args)
1498 int *i = (int *)extra_args;
1501 *i = strtol(value, &end, 10);
1502 if (*i < 0 || *i > UINT16_MAX) {
1503 AF_XDP_LOG(ERR, "Invalid busy_budget %i, must be >= 0 and <= %u\n",
1511 /** parse integer from integer argument */
1513 parse_integer_arg(const char *key __rte_unused,
1514 const char *value, void *extra_args)
1516 int *i = (int *)extra_args;
1519 *i = strtol(value, &end, 10);
1521 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1528 /** parse name argument */
1530 parse_name_arg(const char *key __rte_unused,
1531 const char *value, void *extra_args)
1533 char *name = extra_args;
1535 if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1536 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1541 strlcpy(name, value, IFNAMSIZ);
1546 /** parse xdp prog argument */
1548 parse_prog_arg(const char *key __rte_unused,
1549 const char *value, void *extra_args)
1551 char *path = extra_args;
1553 if (strnlen(value, PATH_MAX) == PATH_MAX) {
1554 AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
1559 if (access(value, F_OK) != 0) {
1560 AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
1561 value, strerror(errno));
1565 strlcpy(path, value, PATH_MAX);
1571 xdp_get_channels_info(const char *if_name, int *max_queues,
1572 int *combined_queues)
1574 struct ethtool_channels channels;
1578 fd = socket(AF_INET, SOCK_DGRAM, 0);
1582 channels.cmd = ETHTOOL_GCHANNELS;
1583 ifr.ifr_data = (void *)&channels;
1584 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1585 ret = ioctl(fd, SIOCETHTOOL, &ifr);
1587 if (errno == EOPNOTSUPP) {
1595 if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1596 /* If the device says it has no channels, then all traffic
1597 * is sent to a single stream, so max queues = 1.
1600 *combined_queues = 1;
1602 *max_queues = channels.max_combined;
1603 *combined_queues = channels.combined_count;
1612 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1613 int *queue_cnt, int *shared_umem, char *prog_path,
1618 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1619 &parse_name_arg, if_name);
1623 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1624 &parse_integer_arg, start_queue);
1628 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1629 &parse_integer_arg, queue_cnt);
1630 if (ret < 0 || *queue_cnt <= 0) {
1635 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG,
1636 &parse_integer_arg, shared_umem);
1640 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
1641 &parse_prog_arg, prog_path);
1645 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_BUDGET_ARG,
1646 &parse_budget_arg, busy_budget);
1651 rte_kvargs_free(kvlist);
1656 get_iface_info(const char *if_name,
1657 struct rte_ether_addr *eth_addr,
1661 int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1666 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1667 if (ioctl(sock, SIOCGIFINDEX, &ifr))
1670 *if_index = ifr.ifr_ifindex;
1672 if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1675 rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1685 static struct rte_eth_dev *
1686 init_internals(struct rte_vdev_device *dev, const char *if_name,
1687 int start_queue_idx, int queue_cnt, int shared_umem,
1688 const char *prog_path, int busy_budget)
1690 const char *name = rte_vdev_device_name(dev);
1691 const unsigned int numa_node = dev->device.numa_node;
1692 struct pmd_internals *internals;
1693 struct rte_eth_dev *eth_dev;
1697 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1698 if (internals == NULL)
1701 internals->start_queue_idx = start_queue_idx;
1702 internals->queue_cnt = queue_cnt;
1703 strlcpy(internals->if_name, if_name, IFNAMSIZ);
1704 strlcpy(internals->prog_path, prog_path, PATH_MAX);
1705 internals->custom_prog_configured = 0;
1707 #ifndef ETH_AF_XDP_SHARED_UMEM
1709 AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
1710 "Check kernel and libbpf version\n");
1711 goto err_free_internals;
1714 internals->shared_umem = shared_umem;
1716 if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1717 &internals->combined_queue_cnt)) {
1718 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1720 goto err_free_internals;
1723 if (queue_cnt > internals->combined_queue_cnt) {
1724 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1725 queue_cnt, internals->combined_queue_cnt);
1726 goto err_free_internals;
1729 internals->rx_queues = rte_zmalloc_socket(NULL,
1730 sizeof(struct pkt_rx_queue) * queue_cnt,
1732 if (internals->rx_queues == NULL) {
1733 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1734 goto err_free_internals;
1737 internals->tx_queues = rte_zmalloc_socket(NULL,
1738 sizeof(struct pkt_tx_queue) * queue_cnt,
1740 if (internals->tx_queues == NULL) {
1741 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1744 for (i = 0; i < queue_cnt; i++) {
1745 internals->tx_queues[i].pair = &internals->rx_queues[i];
1746 internals->rx_queues[i].pair = &internals->tx_queues[i];
1747 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1748 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1749 internals->rx_queues[i].busy_budget = busy_budget;
1752 ret = get_iface_info(if_name, &internals->eth_addr,
1753 &internals->if_index);
1757 eth_dev = rte_eth_vdev_allocate(dev, 0);
1758 if (eth_dev == NULL)
1761 eth_dev->data->dev_private = internals;
1762 eth_dev->data->dev_link = pmd_link;
1763 eth_dev->data->mac_addrs = &internals->eth_addr;
1764 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1765 eth_dev->dev_ops = &ops;
1766 eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1767 eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1769 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1770 AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1776 rte_free(internals->tx_queues);
1778 rte_free(internals->rx_queues);
1780 rte_free(internals);
1785 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1787 struct rte_kvargs *kvlist;
1788 char if_name[IFNAMSIZ] = {'\0'};
1789 int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1790 int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1791 int shared_umem = 0;
1792 char prog_path[PATH_MAX] = {'\0'};
1793 int busy_budget = -1;
1794 struct rte_eth_dev *eth_dev = NULL;
1797 AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1798 rte_vdev_device_name(dev));
1800 name = rte_vdev_device_name(dev);
1801 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1802 AF_XDP_LOG(ERR, "Failed to probe %s. "
1803 "AF_XDP PMD does not support secondary processes.\n",
1808 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1809 if (kvlist == NULL) {
1810 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1814 if (dev->device.numa_node == SOCKET_ID_ANY)
1815 dev->device.numa_node = rte_socket_id();
1817 if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1818 &xsk_queue_cnt, &shared_umem, prog_path,
1819 &busy_budget) < 0) {
1820 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1824 if (strlen(if_name) == 0) {
1825 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1829 busy_budget = busy_budget == -1 ? ETH_AF_XDP_DFLT_BUSY_BUDGET :
1832 eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1833 xsk_queue_cnt, shared_umem, prog_path,
1835 if (eth_dev == NULL) {
1836 AF_XDP_LOG(ERR, "Failed to init internals\n");
1840 rte_eth_dev_probing_finish(eth_dev);
1846 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1848 struct rte_eth_dev *eth_dev = NULL;
1850 AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1856 /* find the ethdev entry */
1857 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1858 if (eth_dev == NULL)
1861 eth_dev_close(eth_dev);
1862 rte_eth_dev_release_port(eth_dev);
1868 static struct rte_vdev_driver pmd_af_xdp_drv = {
1869 .probe = rte_pmd_af_xdp_probe,
1870 .remove = rte_pmd_af_xdp_remove,
1873 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1874 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1876 "start_queue=<int> "
1877 "queue_count=<int> "
1878 "shared_umem=<int> "
1879 "xdp_prog=<string> "
1880 "busy_budget=<int>");