1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation.
8 #include <netinet/in.h>
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_xdp.h>
14 #include <linux/if_link.h>
15 #include <linux/ethtool.h>
16 #include <linux/sockios.h>
17 #include "af_xdp_deps.h"
19 #include <rte_ethdev.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_vdev.h>
22 #include <rte_kvargs.h>
23 #include <rte_bus_vdev.h>
24 #include <rte_string_fns.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
29 #include <rte_ether.h>
30 #include <rte_lcore.h>
32 #include <rte_memory.h>
33 #include <rte_memzone.h>
34 #include <rte_mempool.h>
36 #include <rte_malloc.h>
38 #include <rte_spinlock.h>
39 #include <rte_power_intrinsics.h>
43 #ifndef SO_PREFER_BUSY_POLL
44 #define SO_PREFER_BUSY_POLL 69
46 #ifndef SO_BUSY_POLL_BUDGET
47 #define SO_BUSY_POLL_BUDGET 70
63 RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);
65 #define AF_XDP_LOG(level, fmt, args...) \
66 rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
67 "%s(): " fmt, __func__, ##args)
69 #define ETH_AF_XDP_FRAME_SIZE 2048
70 #define ETH_AF_XDP_NUM_BUFFERS 4096
71 #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS
72 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
73 #define ETH_AF_XDP_DFLT_QUEUE_COUNT 1
74 #define ETH_AF_XDP_DFLT_BUSY_BUDGET 64
75 #define ETH_AF_XDP_DFLT_BUSY_TIMEOUT 20
77 #define ETH_AF_XDP_RX_BATCH_SIZE XSK_RING_CONS__DEFAULT_NUM_DESCS
78 #define ETH_AF_XDP_TX_BATCH_SIZE XSK_RING_CONS__DEFAULT_NUM_DESCS
80 #define ETH_AF_XDP_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
82 #define ETH_AF_XDP_MP_KEY "afxdp_mp_send_fds"
84 static int afxdp_dev_count;
86 /* Message header to synchronize fds via IPC */
88 char port_name[RTE_DEV_NAME_MAX_LEN];
89 /* The file descriptors are in the dedicated part
90 * of the Unix message to be translated by the kernel.
94 struct xsk_umem_info {
95 struct xsk_umem *umem;
96 struct rte_ring *buf_ring;
97 const struct rte_memzone *mz;
98 struct rte_mempool *mb_pool;
110 struct pkt_rx_queue {
111 struct xsk_ring_cons rx;
112 struct xsk_umem_info *umem;
113 struct xsk_socket *xsk;
114 struct rte_mempool *mb_pool;
116 struct rx_stats stats;
118 struct xsk_ring_prod fq;
119 struct xsk_ring_cons cq;
121 struct pkt_tx_queue *pair;
122 struct pollfd fds[1];
133 struct pkt_tx_queue {
134 struct xsk_ring_prod tx;
135 struct xsk_umem_info *umem;
137 struct tx_stats stats;
139 struct pkt_rx_queue *pair;
143 struct pmd_internals {
145 char if_name[IFNAMSIZ];
149 int combined_queue_cnt;
151 char prog_path[PATH_MAX];
152 bool custom_prog_configured;
155 struct rte_ether_addr eth_addr;
157 struct pkt_rx_queue *rx_queues;
158 struct pkt_tx_queue *tx_queues;
161 struct pmd_process_private {
162 int rxq_xsk_fds[RTE_MAX_QUEUES_PER_PORT];
165 #define ETH_AF_XDP_IFACE_ARG "iface"
166 #define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
167 #define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
168 #define ETH_AF_XDP_SHARED_UMEM_ARG "shared_umem"
169 #define ETH_AF_XDP_PROG_ARG "xdp_prog"
170 #define ETH_AF_XDP_BUDGET_ARG "busy_budget"
172 static const char * const valid_arguments[] = {
173 ETH_AF_XDP_IFACE_ARG,
174 ETH_AF_XDP_START_QUEUE_ARG,
175 ETH_AF_XDP_QUEUE_COUNT_ARG,
176 ETH_AF_XDP_SHARED_UMEM_ARG,
178 ETH_AF_XDP_BUDGET_ARG,
182 static const struct rte_eth_link pmd_link = {
183 .link_speed = RTE_ETH_SPEED_NUM_10G,
184 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
185 .link_status = RTE_ETH_LINK_DOWN,
186 .link_autoneg = RTE_ETH_LINK_AUTONEG
189 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
190 struct internal_list {
191 TAILQ_ENTRY(internal_list) next;
192 struct rte_eth_dev *eth_dev;
195 TAILQ_HEAD(internal_list_head, internal_list);
196 static struct internal_list_head internal_list =
197 TAILQ_HEAD_INITIALIZER(internal_list);
199 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
201 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
203 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
204 struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
209 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
210 for (i = 0; i < reserve_size; i++)
211 rte_pktmbuf_free(bufs[i]);
212 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
216 for (i = 0; i < reserve_size; i++) {
220 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
221 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
222 umem->mb_pool->header_size;
226 xsk_ring_prod__submit(fq, reserve_size);
232 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
233 struct rte_mbuf **bufs __rte_unused,
234 struct xsk_ring_prod *fq)
236 void *addrs[reserve_size];
240 if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
242 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
246 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
247 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
248 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
253 for (i = 0; i < reserve_size; i++) {
256 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
257 *fq_addr = (uint64_t)addrs[i];
260 xsk_ring_prod__submit(fq, reserve_size);
267 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
268 struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
270 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
271 return reserve_fill_queue_zc(umem, reserve_size, bufs, fq);
273 return reserve_fill_queue_cp(umem, reserve_size, bufs, fq);
277 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
279 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
281 struct pkt_rx_queue *rxq = queue;
282 struct xsk_ring_cons *rx = &rxq->rx;
283 struct xsk_ring_prod *fq = &rxq->fq;
284 struct xsk_umem_info *umem = rxq->umem;
286 unsigned long rx_bytes = 0;
288 struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
290 nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
293 /* we can assume a kernel >= 5.11 is in use if busy polling is
294 * enabled and thus we can safely use the recvfrom() syscall
295 * which is only supported for AF_XDP sockets in kernels >=
298 if (rxq->busy_budget) {
299 (void)recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
300 MSG_DONTWAIT, NULL, NULL);
301 } else if (xsk_ring_prod__needs_wakeup(fq)) {
302 (void)poll(&rxq->fds[0], 1, 1000);
308 /* allocate bufs for fill queue replenishment after rx */
309 if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
311 "Failed to get enough buffers for fq.\n");
312 /* rollback cached_cons which is added by
313 * xsk_ring_cons__peek
315 rx->cached_cons -= nb_pkts;
319 for (i = 0; i < nb_pkts; i++) {
320 const struct xdp_desc *desc;
325 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
329 offset = xsk_umem__extract_offset(addr);
330 addr = xsk_umem__extract_addr(addr);
332 bufs[i] = (struct rte_mbuf *)
333 xsk_umem__get_data(umem->buffer, addr +
334 umem->mb_pool->header_size);
335 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
336 rte_pktmbuf_priv_size(umem->mb_pool) -
337 umem->mb_pool->header_size;
339 rte_pktmbuf_pkt_len(bufs[i]) = len;
340 rte_pktmbuf_data_len(bufs[i]) = len;
344 xsk_ring_cons__release(rx, nb_pkts);
345 (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
348 rxq->stats.rx_pkts += nb_pkts;
349 rxq->stats.rx_bytes += rx_bytes;
355 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
357 struct pkt_rx_queue *rxq = queue;
358 struct xsk_ring_cons *rx = &rxq->rx;
359 struct xsk_umem_info *umem = rxq->umem;
360 struct xsk_ring_prod *fq = &rxq->fq;
362 unsigned long rx_bytes = 0;
364 uint32_t free_thresh = fq->size >> 1;
365 struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
367 if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
368 (void)reserve_fill_queue(umem, nb_pkts, NULL, fq);
370 nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
372 #if defined(XDP_USE_NEED_WAKEUP)
373 if (xsk_ring_prod__needs_wakeup(fq))
374 (void)poll(rxq->fds, 1, 1000);
379 if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
380 /* rollback cached_cons which is added by
381 * xsk_ring_cons__peek
383 rx->cached_cons -= nb_pkts;
387 for (i = 0; i < nb_pkts; i++) {
388 const struct xdp_desc *desc;
393 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
396 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
398 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
399 rte_ring_enqueue(umem->buf_ring, (void *)addr);
400 rte_pktmbuf_pkt_len(mbufs[i]) = len;
401 rte_pktmbuf_data_len(mbufs[i]) = len;
406 xsk_ring_cons__release(rx, nb_pkts);
409 rxq->stats.rx_pkts += nb_pkts;
410 rxq->stats.rx_bytes += rx_bytes;
417 af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
419 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
420 return af_xdp_rx_zc(queue, bufs, nb_pkts);
422 return af_xdp_rx_cp(queue, bufs, nb_pkts);
427 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
431 if (likely(nb_pkts <= ETH_AF_XDP_RX_BATCH_SIZE))
432 return af_xdp_rx(queue, bufs, nb_pkts);
434 /* Split larger batch into smaller batches of size
435 * ETH_AF_XDP_RX_BATCH_SIZE or less.
441 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
442 ret = af_xdp_rx(queue, &bufs[nb_rx], n);
443 nb_rx = (uint16_t)(nb_rx + ret);
444 nb_pkts = (uint16_t)(nb_pkts - ret);
453 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
458 n = xsk_ring_cons__peek(cq, size, &idx_cq);
460 for (i = 0; i < n; i++) {
462 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
463 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
464 addr = xsk_umem__extract_addr(addr);
465 rte_pktmbuf_free((struct rte_mbuf *)
466 xsk_umem__get_data(umem->buffer,
467 addr + umem->mb_pool->header_size));
469 rte_ring_enqueue(umem->buf_ring, (void *)addr);
473 xsk_ring_cons__release(cq, n);
477 kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
479 struct xsk_umem_info *umem = txq->umem;
481 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
483 if (tx_syscall_needed(&txq->tx))
484 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
485 0, MSG_DONTWAIT) < 0) {
486 /* some thing unexpected */
487 if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
490 /* pull from completion queue to leave more space */
493 XSK_RING_CONS__DEFAULT_NUM_DESCS,
498 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
500 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
502 struct pkt_tx_queue *txq = queue;
503 struct xsk_umem_info *umem = txq->umem;
504 struct rte_mbuf *mbuf;
505 unsigned long tx_bytes = 0;
509 struct xdp_desc *desc;
510 uint64_t addr, offset;
511 struct xsk_ring_cons *cq = &txq->pair->cq;
512 uint32_t free_thresh = cq->size >> 1;
514 if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
515 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
517 for (i = 0; i < nb_pkts; i++) {
520 if (mbuf->pool == umem->mb_pool) {
521 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
523 if (!xsk_ring_prod__reserve(&txq->tx, 1,
527 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
528 desc->len = mbuf->pkt_len;
529 addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
530 umem->mb_pool->header_size;
531 offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
533 umem->mb_pool->header_size;
534 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
535 desc->addr = addr | offset;
538 struct rte_mbuf *local_mbuf =
539 rte_pktmbuf_alloc(umem->mb_pool);
542 if (local_mbuf == NULL)
545 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
546 rte_pktmbuf_free(local_mbuf);
550 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
551 desc->len = mbuf->pkt_len;
553 addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
554 umem->mb_pool->header_size;
555 offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
556 (uint64_t)local_mbuf +
557 umem->mb_pool->header_size;
558 pkt = xsk_umem__get_data(umem->buffer, addr + offset);
559 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
560 desc->addr = addr | offset;
561 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
563 rte_pktmbuf_free(mbuf);
567 tx_bytes += mbuf->pkt_len;
571 xsk_ring_prod__submit(&txq->tx, count);
574 txq->stats.tx_pkts += count;
575 txq->stats.tx_bytes += tx_bytes;
576 txq->stats.tx_dropped += nb_pkts - count;
582 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
584 struct pkt_tx_queue *txq = queue;
585 struct xsk_umem_info *umem = txq->umem;
586 struct rte_mbuf *mbuf;
587 void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
588 unsigned long tx_bytes = 0;
591 struct xsk_ring_cons *cq = &txq->pair->cq;
593 pull_umem_cq(umem, nb_pkts, cq);
595 nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
600 if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
602 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
606 for (i = 0; i < nb_pkts; i++) {
607 struct xdp_desc *desc;
610 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
612 desc->len = mbuf->pkt_len;
614 desc->addr = (uint64_t)addrs[i];
615 pkt = xsk_umem__get_data(umem->mz->addr,
617 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
618 tx_bytes += mbuf->pkt_len;
619 rte_pktmbuf_free(mbuf);
622 xsk_ring_prod__submit(&txq->tx, nb_pkts);
626 txq->stats.tx_pkts += nb_pkts;
627 txq->stats.tx_bytes += tx_bytes;
633 af_xdp_tx_cp_batch(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
637 if (likely(nb_pkts <= ETH_AF_XDP_TX_BATCH_SIZE))
638 return af_xdp_tx_cp(queue, bufs, nb_pkts);
644 /* Split larger batch into smaller batches of size
645 * ETH_AF_XDP_TX_BATCH_SIZE or less.
647 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
648 ret = af_xdp_tx_cp(queue, &bufs[nb_tx], n);
649 nb_tx = (uint16_t)(nb_tx + ret);
650 nb_pkts = (uint16_t)(nb_pkts - ret);
660 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
662 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
663 return af_xdp_tx_zc(queue, bufs, nb_pkts);
665 return af_xdp_tx_cp_batch(queue, bufs, nb_pkts);
670 eth_dev_start(struct rte_eth_dev *dev)
672 dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
677 /* This function gets called when the current port gets stopped. */
679 eth_dev_stop(struct rte_eth_dev *dev)
681 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
685 /* Find ethdev in list */
686 static inline struct internal_list *
687 find_internal_resource(struct pmd_internals *port_int)
690 struct internal_list *list = NULL;
692 if (port_int == NULL)
695 pthread_mutex_lock(&internal_list_lock);
697 TAILQ_FOREACH(list, &internal_list, next) {
698 struct pmd_internals *list_int =
699 list->eth_dev->data->dev_private;
700 if (list_int == port_int) {
706 pthread_mutex_unlock(&internal_list_lock);
715 eth_dev_configure(struct rte_eth_dev *dev)
717 struct pmd_internals *internal = dev->data->dev_private;
719 /* rx/tx must be paired */
720 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
723 if (internal->shared_umem) {
724 struct internal_list *list = NULL;
725 const char *name = dev->device->name;
727 /* Ensure PMD is not already inserted into the list */
728 list = find_internal_resource(internal);
732 list = rte_zmalloc_socket(name, sizeof(*list), 0,
733 dev->device->numa_node);
738 pthread_mutex_lock(&internal_list_lock);
739 TAILQ_INSERT_TAIL(&internal_list, list, next);
740 pthread_mutex_unlock(&internal_list_lock);
746 #define CLB_VAL_IDX 0
748 eth_monitor_callback(const uint64_t value,
749 const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
751 const uint64_t v = opaque[CLB_VAL_IDX];
752 const uint64_t m = (uint32_t)~0;
754 /* if the value has changed, abort entering power optimized state */
755 return (value & m) == v ? 0 : -1;
759 eth_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
761 struct pkt_rx_queue *rxq = rx_queue;
762 unsigned int *prod = rxq->rx.producer;
763 const uint32_t cur_val = rxq->rx.cached_prod; /* use cached value */
765 /* watch for changes in producer ring */
766 pmc->addr = (void *)prod;
768 /* store current value */
769 pmc->opaque[CLB_VAL_IDX] = cur_val;
770 pmc->fn = eth_monitor_callback;
772 /* AF_XDP producer ring index is 32-bit */
773 pmc->size = sizeof(uint32_t);
779 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
781 struct pmd_internals *internals = dev->data->dev_private;
783 dev_info->if_index = internals->if_index;
784 dev_info->max_mac_addrs = 1;
785 dev_info->max_rx_queues = internals->queue_cnt;
786 dev_info->max_tx_queues = internals->queue_cnt;
788 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
789 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
790 dev_info->max_rx_pktlen = getpagesize() -
791 sizeof(struct rte_mempool_objhdr) -
792 sizeof(struct rte_mbuf) -
793 RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
795 dev_info->max_rx_pktlen = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
797 dev_info->max_mtu = dev_info->max_rx_pktlen - ETH_AF_XDP_ETH_OVERHEAD;
799 dev_info->default_rxportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
800 dev_info->default_txportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
801 dev_info->default_rxportconf.nb_queues = 1;
802 dev_info->default_txportconf.nb_queues = 1;
803 dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
804 dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
810 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
812 struct pmd_internals *internals = dev->data->dev_private;
813 struct pmd_process_private *process_private = dev->process_private;
814 struct xdp_statistics xdp_stats;
815 struct pkt_rx_queue *rxq;
816 struct pkt_tx_queue *txq;
820 for (i = 0; i < dev->data->nb_rx_queues; i++) {
821 optlen = sizeof(struct xdp_statistics);
822 rxq = &internals->rx_queues[i];
824 stats->q_ipackets[i] = rxq->stats.rx_pkts;
825 stats->q_ibytes[i] = rxq->stats.rx_bytes;
827 stats->q_opackets[i] = txq->stats.tx_pkts;
828 stats->q_obytes[i] = txq->stats.tx_bytes;
830 stats->ipackets += stats->q_ipackets[i];
831 stats->ibytes += stats->q_ibytes[i];
832 stats->imissed += rxq->stats.rx_dropped;
833 stats->oerrors += txq->stats.tx_dropped;
834 fd = process_private->rxq_xsk_fds[i];
835 ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS,
836 &xdp_stats, &optlen) : -1;
838 AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
841 stats->imissed += xdp_stats.rx_dropped;
843 stats->opackets += stats->q_opackets[i];
844 stats->obytes += stats->q_obytes[i];
851 eth_stats_reset(struct rte_eth_dev *dev)
853 struct pmd_internals *internals = dev->data->dev_private;
856 for (i = 0; i < internals->queue_cnt; i++) {
857 memset(&internals->rx_queues[i].stats, 0,
858 sizeof(struct rx_stats));
859 memset(&internals->tx_queues[i].stats, 0,
860 sizeof(struct tx_stats));
867 remove_xdp_program(struct pmd_internals *internals)
869 uint32_t curr_prog_id = 0;
871 if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
872 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
873 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
876 bpf_set_link_xdp_fd(internals->if_index, -1,
877 XDP_FLAGS_UPDATE_IF_NOEXIST);
881 xdp_umem_destroy(struct xsk_umem_info *umem)
883 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
884 umem->mb_pool = NULL;
886 rte_memzone_free(umem->mz);
889 rte_ring_free(umem->buf_ring);
890 umem->buf_ring = NULL;
897 eth_dev_close(struct rte_eth_dev *dev)
899 struct pmd_internals *internals = dev->data->dev_private;
900 struct pkt_rx_queue *rxq;
903 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
906 AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
909 for (i = 0; i < internals->queue_cnt; i++) {
910 rxq = &internals->rx_queues[i];
911 if (rxq->umem == NULL)
913 xsk_socket__delete(rxq->xsk);
915 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE)
917 (void)xsk_umem__delete(rxq->umem->umem);
918 xdp_umem_destroy(rxq->umem);
921 /* free pkt_tx_queue */
927 * MAC is not allocated dynamically, setting it to NULL would prevent
928 * from releasing it in rte_eth_dev_release_port.
930 dev->data->mac_addrs = NULL;
932 remove_xdp_program(internals);
934 if (internals->shared_umem) {
935 struct internal_list *list;
937 /* Remove ethdev from list used to track and share UMEMs */
938 list = find_internal_resource(internals);
940 pthread_mutex_lock(&internal_list_lock);
941 TAILQ_REMOVE(&internal_list, list, next);
942 pthread_mutex_unlock(&internal_list_lock);
948 rte_free(dev->process_private);
954 eth_link_update(struct rte_eth_dev *dev __rte_unused,
955 int wait_to_complete __rte_unused)
960 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
961 static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
963 struct rte_mempool_memhdr *memhdr;
964 uintptr_t memhdr_addr, aligned_addr;
966 memhdr = STAILQ_FIRST(&mp->mem_list);
967 memhdr_addr = (uintptr_t)memhdr->addr;
968 aligned_addr = memhdr_addr & ~(getpagesize() - 1);
969 *align = memhdr_addr - aligned_addr;
974 /* Check if the netdev,qid context already exists */
976 ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
977 struct pkt_rx_queue *list_rxq, const char *list_ifname)
981 if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
982 !strncmp(ifname, list_ifname, IFNAMSIZ)) {
983 AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
984 ifname, rxq->xsk_queue_idx);
991 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
993 get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
994 struct xsk_umem_info **umem)
996 struct internal_list *list;
997 struct pmd_internals *internals;
999 struct rte_mempool *mb_pool = rxq->mb_pool;
1001 if (mb_pool == NULL)
1004 pthread_mutex_lock(&internal_list_lock);
1006 TAILQ_FOREACH(list, &internal_list, next) {
1007 internals = list->eth_dev->data->dev_private;
1008 for (i = 0; i < internals->queue_cnt; i++) {
1009 struct pkt_rx_queue *list_rxq =
1010 &internals->rx_queues[i];
1011 if (rxq == list_rxq)
1013 if (mb_pool == internals->rx_queues[i].mb_pool) {
1014 if (ctx_exists(rxq, ifname, list_rxq,
1015 internals->if_name)) {
1019 if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
1020 __ATOMIC_ACQUIRE)) {
1021 *umem = internals->rx_queues[i].umem;
1029 pthread_mutex_unlock(&internal_list_lock);
1035 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
1036 struct pkt_rx_queue *rxq)
1038 struct xsk_umem_info *umem = NULL;
1040 struct xsk_umem_config usr_config = {
1041 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2,
1042 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1043 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
1044 void *base_addr = NULL;
1045 struct rte_mempool *mb_pool = rxq->mb_pool;
1046 uint64_t umem_size, align = 0;
1048 if (internals->shared_umem) {
1049 if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
1053 __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
1055 AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
1056 internals->if_name, rxq->xsk_queue_idx);
1057 __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
1062 usr_config.frame_size =
1063 rte_mempool_calc_obj_size(mb_pool->elt_size,
1064 mb_pool->flags, NULL);
1065 usr_config.frame_headroom = mb_pool->header_size +
1066 sizeof(struct rte_mbuf) +
1067 rte_pktmbuf_priv_size(mb_pool) +
1068 RTE_PKTMBUF_HEADROOM;
1070 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
1073 AF_XDP_LOG(ERR, "Failed to allocate umem info\n");
1077 umem->mb_pool = mb_pool;
1078 base_addr = (void *)get_base_addr(mb_pool, &align);
1079 umem_size = (uint64_t)mb_pool->populated_size *
1080 (uint64_t)usr_config.frame_size +
1083 ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
1084 &rxq->fq, &rxq->cq, &usr_config);
1086 AF_XDP_LOG(ERR, "Failed to create umem\n");
1089 umem->buffer = base_addr;
1091 if (internals->shared_umem) {
1092 umem->max_xsks = mb_pool->populated_size /
1093 ETH_AF_XDP_NUM_BUFFERS;
1094 AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
1095 mb_pool->name, umem->max_xsks);
1098 __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
1104 xdp_umem_destroy(umem);
1109 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
1110 struct pkt_rx_queue *rxq)
1112 struct xsk_umem_info *umem;
1113 const struct rte_memzone *mz;
1114 struct xsk_umem_config usr_config = {
1115 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1116 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1117 .frame_size = ETH_AF_XDP_FRAME_SIZE,
1118 .frame_headroom = 0 };
1119 char ring_name[RTE_RING_NAMESIZE];
1120 char mz_name[RTE_MEMZONE_NAMESIZE];
1124 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
1126 AF_XDP_LOG(ERR, "Failed to allocate umem info\n");
1130 snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
1131 internals->if_name, rxq->xsk_queue_idx);
1132 umem->buf_ring = rte_ring_create(ring_name,
1133 ETH_AF_XDP_NUM_BUFFERS,
1136 if (umem->buf_ring == NULL) {
1137 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
1141 for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
1142 rte_ring_enqueue(umem->buf_ring,
1143 (void *)(i * ETH_AF_XDP_FRAME_SIZE));
1145 snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
1146 internals->if_name, rxq->xsk_queue_idx);
1147 mz = rte_memzone_reserve_aligned(mz_name,
1148 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1149 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1152 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
1156 ret = xsk_umem__create(&umem->umem, mz->addr,
1157 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1162 AF_XDP_LOG(ERR, "Failed to create umem\n");
1170 xdp_umem_destroy(umem);
1176 load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map)
1179 struct bpf_object *obj;
1181 prog_fd = load_program(prog_path, &obj);
1183 AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
1188 * The loaded program must provision for a map of xsks, such that some
1189 * traffic can be redirected to userspace.
1191 *map = bpf_object__find_map_by_name(obj, "xsks_map");
1193 AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
1197 /* Link the program with the given network device */
1198 ret = bpf_set_link_xdp_fd(if_index, prog_fd,
1199 XDP_FLAGS_UPDATE_IF_NOEXIST);
1201 AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
1206 AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
1207 prog_path, prog_fd);
1212 /* Detect support for busy polling through setsockopt(). */
1214 configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
1217 int fd = xsk_socket__fd(rxq->xsk);
1220 ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
1221 (void *)&sock_opt, sizeof(sock_opt));
1223 AF_XDP_LOG(DEBUG, "Failed to set SO_PREFER_BUSY_POLL\n");
1227 sock_opt = ETH_AF_XDP_DFLT_BUSY_TIMEOUT;
1228 ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
1231 AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL\n");
1235 sock_opt = rxq->busy_budget;
1236 ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL_BUDGET,
1237 (void *)&sock_opt, sizeof(sock_opt));
1239 AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL_BUDGET\n");
1241 AF_XDP_LOG(INFO, "Busy polling budget set to: %u\n",
1246 /* setsockopt failure - attempt to restore xsk to default state and
1247 * proceed without busy polling support.
1250 ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
1253 AF_XDP_LOG(ERR, "Failed to unset SO_BUSY_POLL\n");
1259 ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
1260 (void *)&sock_opt, sizeof(sock_opt));
1262 AF_XDP_LOG(ERR, "Failed to unset SO_PREFER_BUSY_POLL\n");
1267 rxq->busy_budget = 0;
1272 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
1275 struct xsk_socket_config cfg;
1276 struct pkt_tx_queue *txq = rxq->pair;
1278 int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS;
1279 struct rte_mbuf *fq_bufs[reserve_size];
1281 rxq->umem = xdp_umem_configure(internals, rxq);
1282 if (rxq->umem == NULL)
1284 txq->umem = rxq->umem;
1286 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1287 ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
1289 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
1294 ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
1296 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
1300 cfg.rx_size = ring_size;
1301 cfg.tx_size = ring_size;
1302 cfg.libbpf_flags = 0;
1303 cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
1306 #if defined(XDP_USE_NEED_WAKEUP)
1307 cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
1310 if (strnlen(internals->prog_path, PATH_MAX) &&
1311 !internals->custom_prog_configured) {
1312 ret = load_custom_xdp_prog(internals->prog_path,
1313 internals->if_index,
1316 AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
1317 internals->prog_path);
1320 internals->custom_prog_configured = 1;
1321 cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
1324 if (internals->shared_umem)
1325 ret = create_shared_socket(&rxq->xsk, internals->if_name,
1326 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1327 &txq->tx, &rxq->fq, &rxq->cq, &cfg);
1329 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
1330 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1334 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
1338 /* insert the xsk into the xsks_map */
1339 if (internals->custom_prog_configured) {
1342 fd = xsk_socket__fd(rxq->xsk);
1343 err = bpf_map_update_elem(bpf_map__fd(internals->map),
1344 &rxq->xsk_queue_idx, &fd, 0);
1346 AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n");
1351 if (rxq->busy_budget) {
1352 ret = configure_preferred_busy_poll(rxq);
1354 AF_XDP_LOG(ERR, "Failed configure busy polling.\n");
1362 xsk_socket__delete(rxq->xsk);
1364 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
1365 xdp_umem_destroy(rxq->umem);
1371 eth_rx_queue_setup(struct rte_eth_dev *dev,
1372 uint16_t rx_queue_id,
1373 uint16_t nb_rx_desc,
1374 unsigned int socket_id __rte_unused,
1375 const struct rte_eth_rxconf *rx_conf __rte_unused,
1376 struct rte_mempool *mb_pool)
1378 struct pmd_internals *internals = dev->data->dev_private;
1379 struct pmd_process_private *process_private = dev->process_private;
1380 struct pkt_rx_queue *rxq;
1383 rxq = &internals->rx_queues[rx_queue_id];
1385 AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
1386 rx_queue_id, rxq->xsk_queue_idx);
1388 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
1389 uint32_t buf_size, data_size;
1391 /* Now get the space available for data in the mbuf */
1392 buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1393 RTE_PKTMBUF_HEADROOM;
1394 data_size = ETH_AF_XDP_FRAME_SIZE;
1396 if (data_size > buf_size) {
1397 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
1398 dev->device->name, data_size, buf_size);
1404 rxq->mb_pool = mb_pool;
1406 if (xsk_configure(internals, rxq, nb_rx_desc)) {
1407 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
1412 if (!rxq->busy_budget)
1413 AF_XDP_LOG(DEBUG, "Preferred busy polling not enabled\n");
1415 rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
1416 rxq->fds[0].events = POLLIN;
1418 process_private->rxq_xsk_fds[rx_queue_id] = rxq->fds[0].fd;
1420 dev->data->rx_queues[rx_queue_id] = rxq;
1428 eth_tx_queue_setup(struct rte_eth_dev *dev,
1429 uint16_t tx_queue_id,
1430 uint16_t nb_tx_desc __rte_unused,
1431 unsigned int socket_id __rte_unused,
1432 const struct rte_eth_txconf *tx_conf __rte_unused)
1434 struct pmd_internals *internals = dev->data->dev_private;
1435 struct pkt_tx_queue *txq;
1437 txq = &internals->tx_queues[tx_queue_id];
1439 dev->data->tx_queues[tx_queue_id] = txq;
1444 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1446 struct pmd_internals *internals = dev->data->dev_private;
1447 struct ifreq ifr = { .ifr_mtu = mtu };
1451 s = socket(PF_INET, SOCK_DGRAM, 0);
1455 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
1456 ret = ioctl(s, SIOCSIFMTU, &ifr);
1459 return (ret < 0) ? -errno : 0;
1463 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1469 s = socket(PF_INET, SOCK_DGRAM, 0);
1473 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1474 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1478 ifr.ifr_flags &= mask;
1479 ifr.ifr_flags |= flags;
1480 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1490 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1492 struct pmd_internals *internals = dev->data->dev_private;
1494 return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1498 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1500 struct pmd_internals *internals = dev->data->dev_private;
1502 return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1505 static const struct eth_dev_ops ops = {
1506 .dev_start = eth_dev_start,
1507 .dev_stop = eth_dev_stop,
1508 .dev_close = eth_dev_close,
1509 .dev_configure = eth_dev_configure,
1510 .dev_infos_get = eth_dev_info,
1511 .mtu_set = eth_dev_mtu_set,
1512 .promiscuous_enable = eth_dev_promiscuous_enable,
1513 .promiscuous_disable = eth_dev_promiscuous_disable,
1514 .rx_queue_setup = eth_rx_queue_setup,
1515 .tx_queue_setup = eth_tx_queue_setup,
1516 .link_update = eth_link_update,
1517 .stats_get = eth_stats_get,
1518 .stats_reset = eth_stats_reset,
1519 .get_monitor_addr = eth_get_monitor_addr,
1522 /** parse busy_budget argument */
1524 parse_budget_arg(const char *key __rte_unused,
1525 const char *value, void *extra_args)
1527 int *i = (int *)extra_args;
1530 *i = strtol(value, &end, 10);
1531 if (*i < 0 || *i > UINT16_MAX) {
1532 AF_XDP_LOG(ERR, "Invalid busy_budget %i, must be >= 0 and <= %u\n",
1540 /** parse integer from integer argument */
1542 parse_integer_arg(const char *key __rte_unused,
1543 const char *value, void *extra_args)
1545 int *i = (int *)extra_args;
1548 *i = strtol(value, &end, 10);
1550 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1557 /** parse name argument */
1559 parse_name_arg(const char *key __rte_unused,
1560 const char *value, void *extra_args)
1562 char *name = extra_args;
1564 if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1565 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1570 strlcpy(name, value, IFNAMSIZ);
1575 /** parse xdp prog argument */
1577 parse_prog_arg(const char *key __rte_unused,
1578 const char *value, void *extra_args)
1580 char *path = extra_args;
1582 if (strnlen(value, PATH_MAX) == PATH_MAX) {
1583 AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
1588 if (access(value, F_OK) != 0) {
1589 AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
1590 value, strerror(errno));
1594 strlcpy(path, value, PATH_MAX);
1600 xdp_get_channels_info(const char *if_name, int *max_queues,
1601 int *combined_queues)
1603 struct ethtool_channels channels;
1607 fd = socket(AF_INET, SOCK_DGRAM, 0);
1611 channels.cmd = ETHTOOL_GCHANNELS;
1612 ifr.ifr_data = (void *)&channels;
1613 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1614 ret = ioctl(fd, SIOCETHTOOL, &ifr);
1616 if (errno == EOPNOTSUPP) {
1624 if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1625 /* If the device says it has no channels, then all traffic
1626 * is sent to a single stream, so max queues = 1.
1629 *combined_queues = 1;
1631 *max_queues = channels.max_combined;
1632 *combined_queues = channels.combined_count;
1641 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1642 int *queue_cnt, int *shared_umem, char *prog_path,
1647 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1648 &parse_name_arg, if_name);
1652 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1653 &parse_integer_arg, start_queue);
1657 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1658 &parse_integer_arg, queue_cnt);
1659 if (ret < 0 || *queue_cnt <= 0) {
1664 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG,
1665 &parse_integer_arg, shared_umem);
1669 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
1670 &parse_prog_arg, prog_path);
1674 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_BUDGET_ARG,
1675 &parse_budget_arg, busy_budget);
1680 rte_kvargs_free(kvlist);
1685 get_iface_info(const char *if_name,
1686 struct rte_ether_addr *eth_addr,
1690 int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1695 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1696 if (ioctl(sock, SIOCGIFINDEX, &ifr))
1699 *if_index = ifr.ifr_ifindex;
1701 if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1704 rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1714 static struct rte_eth_dev *
1715 init_internals(struct rte_vdev_device *dev, const char *if_name,
1716 int start_queue_idx, int queue_cnt, int shared_umem,
1717 const char *prog_path, int busy_budget)
1719 const char *name = rte_vdev_device_name(dev);
1720 const unsigned int numa_node = dev->device.numa_node;
1721 struct pmd_process_private *process_private;
1722 struct pmd_internals *internals;
1723 struct rte_eth_dev *eth_dev;
1727 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1728 if (internals == NULL)
1731 internals->start_queue_idx = start_queue_idx;
1732 internals->queue_cnt = queue_cnt;
1733 strlcpy(internals->if_name, if_name, IFNAMSIZ);
1734 strlcpy(internals->prog_path, prog_path, PATH_MAX);
1735 internals->custom_prog_configured = 0;
1737 #ifndef ETH_AF_XDP_SHARED_UMEM
1739 AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
1740 "Check kernel and libbpf version\n");
1741 goto err_free_internals;
1744 internals->shared_umem = shared_umem;
1746 if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1747 &internals->combined_queue_cnt)) {
1748 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1750 goto err_free_internals;
1753 if (queue_cnt > internals->combined_queue_cnt) {
1754 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1755 queue_cnt, internals->combined_queue_cnt);
1756 goto err_free_internals;
1759 internals->rx_queues = rte_zmalloc_socket(NULL,
1760 sizeof(struct pkt_rx_queue) * queue_cnt,
1762 if (internals->rx_queues == NULL) {
1763 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1764 goto err_free_internals;
1767 internals->tx_queues = rte_zmalloc_socket(NULL,
1768 sizeof(struct pkt_tx_queue) * queue_cnt,
1770 if (internals->tx_queues == NULL) {
1771 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1774 for (i = 0; i < queue_cnt; i++) {
1775 internals->tx_queues[i].pair = &internals->rx_queues[i];
1776 internals->rx_queues[i].pair = &internals->tx_queues[i];
1777 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1778 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1779 internals->rx_queues[i].busy_budget = busy_budget;
1782 ret = get_iface_info(if_name, &internals->eth_addr,
1783 &internals->if_index);
1787 process_private = (struct pmd_process_private *)
1788 rte_zmalloc_socket(name, sizeof(struct pmd_process_private),
1789 RTE_CACHE_LINE_SIZE, numa_node);
1790 if (process_private == NULL) {
1791 AF_XDP_LOG(ERR, "Failed to alloc memory for process private\n");
1795 eth_dev = rte_eth_vdev_allocate(dev, 0);
1796 if (eth_dev == NULL)
1799 eth_dev->data->dev_private = internals;
1800 eth_dev->data->dev_link = pmd_link;
1801 eth_dev->data->mac_addrs = &internals->eth_addr;
1802 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1803 eth_dev->dev_ops = &ops;
1804 eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1805 eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1806 eth_dev->process_private = process_private;
1808 for (i = 0; i < queue_cnt; i++)
1809 process_private->rxq_xsk_fds[i] = -1;
1811 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1812 AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1818 rte_free(process_private);
1820 rte_free(internals->tx_queues);
1822 rte_free(internals->rx_queues);
1824 rte_free(internals);
1828 /* Secondary process requests rxq fds from primary. */
1830 afxdp_mp_request_fds(const char *name, struct rte_eth_dev *dev)
1832 struct pmd_process_private *process_private = dev->process_private;
1833 struct timespec timeout = {.tv_sec = 1, .tv_nsec = 0};
1834 struct rte_mp_msg request, *reply;
1835 struct rte_mp_reply replies;
1836 struct ipc_hdr *request_param = (struct ipc_hdr *)request.param;
1839 /* Prepare the request */
1840 memset(&request, 0, sizeof(request));
1841 strlcpy(request.name, ETH_AF_XDP_MP_KEY, sizeof(request.name));
1842 strlcpy(request_param->port_name, name,
1843 sizeof(request_param->port_name));
1844 request.len_param = sizeof(*request_param);
1846 /* Send the request and receive the reply */
1847 AF_XDP_LOG(DEBUG, "Sending multi-process IPC request for %s\n", name);
1848 ret = rte_mp_request_sync(&request, &replies, &timeout);
1849 if (ret < 0 || replies.nb_received != 1) {
1850 AF_XDP_LOG(ERR, "Failed to request fds from primary: %d\n",
1854 reply = replies.msgs;
1855 AF_XDP_LOG(DEBUG, "Received multi-process IPC reply for %s\n", name);
1856 if (dev->data->nb_rx_queues != reply->num_fds) {
1857 AF_XDP_LOG(ERR, "Incorrect number of fds received: %d != %d\n",
1858 reply->num_fds, dev->data->nb_rx_queues);
1862 for (i = 0; i < reply->num_fds; i++)
1863 process_private->rxq_xsk_fds[i] = reply->fds[i];
1869 /* Primary process sends rxq fds to secondary. */
1871 afxdp_mp_send_fds(const struct rte_mp_msg *request, const void *peer)
1873 struct rte_eth_dev *dev;
1874 struct pmd_process_private *process_private;
1875 struct rte_mp_msg reply;
1876 const struct ipc_hdr *request_param =
1877 (const struct ipc_hdr *)request->param;
1878 struct ipc_hdr *reply_param =
1879 (struct ipc_hdr *)reply.param;
1880 const char *request_name = request_param->port_name;
1883 AF_XDP_LOG(DEBUG, "Received multi-process IPC request for %s\n",
1886 /* Find the requested port */
1887 dev = rte_eth_dev_get_by_name(request_name);
1889 AF_XDP_LOG(ERR, "Failed to get port id for %s\n", request_name);
1892 process_private = dev->process_private;
1894 /* Populate the reply with the xsk fd for each queue */
1896 if (dev->data->nb_rx_queues > RTE_MP_MAX_FD_NUM) {
1897 AF_XDP_LOG(ERR, "Number of rx queues (%d) exceeds max number of fds (%d)\n",
1898 dev->data->nb_rx_queues, RTE_MP_MAX_FD_NUM);
1902 for (i = 0; i < dev->data->nb_rx_queues; i++)
1903 reply.fds[reply.num_fds++] = process_private->rxq_xsk_fds[i];
1905 /* Send the reply */
1906 strlcpy(reply.name, request->name, sizeof(reply.name));
1907 strlcpy(reply_param->port_name, request_name,
1908 sizeof(reply_param->port_name));
1909 reply.len_param = sizeof(*reply_param);
1910 AF_XDP_LOG(DEBUG, "Sending multi-process IPC reply for %s\n",
1911 reply_param->port_name);
1912 if (rte_mp_reply(&reply, peer) < 0) {
1913 AF_XDP_LOG(ERR, "Failed to reply to multi-process IPC request\n");
1920 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1922 struct rte_kvargs *kvlist;
1923 char if_name[IFNAMSIZ] = {'\0'};
1924 int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1925 int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1926 int shared_umem = 0;
1927 char prog_path[PATH_MAX] = {'\0'};
1928 int busy_budget = -1, ret;
1929 struct rte_eth_dev *eth_dev = NULL;
1930 const char *name = rte_vdev_device_name(dev);
1932 AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n", name);
1934 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1935 eth_dev = rte_eth_dev_attach_secondary(name);
1936 if (eth_dev == NULL) {
1937 AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1940 eth_dev->dev_ops = &ops;
1941 eth_dev->device = &dev->device;
1942 eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
1943 eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
1944 eth_dev->process_private = (struct pmd_process_private *)
1945 rte_zmalloc_socket(name,
1946 sizeof(struct pmd_process_private),
1947 RTE_CACHE_LINE_SIZE,
1948 eth_dev->device->numa_node);
1949 if (eth_dev->process_private == NULL) {
1951 "Failed to alloc memory for process private\n");
1955 /* Obtain the xsk fds from the primary process. */
1956 if (afxdp_mp_request_fds(name, eth_dev))
1959 rte_eth_dev_probing_finish(eth_dev);
1963 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1964 if (kvlist == NULL) {
1965 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1969 if (dev->device.numa_node == SOCKET_ID_ANY)
1970 dev->device.numa_node = rte_socket_id();
1972 if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1973 &xsk_queue_cnt, &shared_umem, prog_path,
1974 &busy_budget) < 0) {
1975 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1979 if (strlen(if_name) == 0) {
1980 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1984 busy_budget = busy_budget == -1 ? ETH_AF_XDP_DFLT_BUSY_BUDGET :
1987 eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1988 xsk_queue_cnt, shared_umem, prog_path,
1990 if (eth_dev == NULL) {
1991 AF_XDP_LOG(ERR, "Failed to init internals\n");
1995 /* Register IPC callback which shares xsk fds from primary to secondary */
1996 if (!afxdp_dev_count) {
1997 ret = rte_mp_action_register(ETH_AF_XDP_MP_KEY, afxdp_mp_send_fds);
1998 if (ret < 0 && rte_errno != ENOTSUP) {
1999 AF_XDP_LOG(ERR, "%s: Failed to register multi-process IPC callback: %s\n",
2000 name, strerror(rte_errno));
2006 rte_eth_dev_probing_finish(eth_dev);
2012 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
2014 struct rte_eth_dev *eth_dev = NULL;
2016 AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
2022 /* find the ethdev entry */
2023 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
2024 if (eth_dev == NULL)
2027 eth_dev_close(eth_dev);
2028 if (afxdp_dev_count == 1)
2029 rte_mp_action_unregister(ETH_AF_XDP_MP_KEY);
2031 rte_eth_dev_release_port(eth_dev);
2036 static struct rte_vdev_driver pmd_af_xdp_drv = {
2037 .probe = rte_pmd_af_xdp_probe,
2038 .remove = rte_pmd_af_xdp_remove,
2041 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
2042 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
2044 "start_queue=<int> "
2045 "queue_count=<int> "
2046 "shared_umem=<int> "
2047 "xdp_prog=<string> "
2048 "busy_budget=<int>");