1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation.
9 #include <netinet/in.h>
11 #include <sys/socket.h>
12 #include <sys/ioctl.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_xdp.h>
15 #include <linux/if_link.h>
16 #include <linux/ethtool.h>
17 #include <linux/sockios.h>
18 #include "af_xdp_deps.h"
21 #include <rte_ethdev.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_bus_vdev.h>
26 #include <rte_string_fns.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
31 #include <rte_ether.h>
32 #include <rte_lcore.h>
34 #include <rte_memory.h>
35 #include <rte_memzone.h>
36 #include <rte_mempool.h>
38 #include <rte_malloc.h>
40 #include <rte_spinlock.h>
57 RTE_LOG_REGISTER(af_xdp_logtype, pmd.net.af_xdp, NOTICE);
59 #define AF_XDP_LOG(level, fmt, args...) \
60 rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
61 "%s(): " fmt, __func__, ##args)
63 #define ETH_AF_XDP_FRAME_SIZE 2048
64 #define ETH_AF_XDP_NUM_BUFFERS 4096
65 #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS
66 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
67 #define ETH_AF_XDP_DFLT_QUEUE_COUNT 1
69 #define ETH_AF_XDP_RX_BATCH_SIZE 32
70 #define ETH_AF_XDP_TX_BATCH_SIZE 32
73 struct xsk_umem_info {
74 struct xsk_umem *umem;
75 struct rte_ring *buf_ring;
76 const struct rte_memzone *mz;
77 struct rte_mempool *mb_pool;
90 struct xsk_ring_cons rx;
91 struct xsk_umem_info *umem;
92 struct xsk_socket *xsk;
93 struct rte_mempool *mb_pool;
95 struct rx_stats stats;
97 struct xsk_ring_prod fq;
98 struct xsk_ring_cons cq;
100 struct pkt_tx_queue *pair;
101 struct pollfd fds[1];
111 struct pkt_tx_queue {
112 struct xsk_ring_prod tx;
113 struct xsk_umem_info *umem;
115 struct tx_stats stats;
117 struct pkt_rx_queue *pair;
121 struct pmd_internals {
123 char if_name[IFNAMSIZ];
127 int combined_queue_cnt;
130 struct rte_ether_addr eth_addr;
132 struct pkt_rx_queue *rx_queues;
133 struct pkt_tx_queue *tx_queues;
136 #define ETH_AF_XDP_IFACE_ARG "iface"
137 #define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
138 #define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
139 #define ETH_AF_XDP_SHARED_UMEM_ARG "shared_umem"
141 static const char * const valid_arguments[] = {
142 ETH_AF_XDP_IFACE_ARG,
143 ETH_AF_XDP_START_QUEUE_ARG,
144 ETH_AF_XDP_QUEUE_COUNT_ARG,
145 ETH_AF_XDP_SHARED_UMEM_ARG,
149 static const struct rte_eth_link pmd_link = {
150 .link_speed = ETH_SPEED_NUM_10G,
151 .link_duplex = ETH_LINK_FULL_DUPLEX,
152 .link_status = ETH_LINK_DOWN,
153 .link_autoneg = ETH_LINK_AUTONEG
156 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
157 struct internal_list {
158 TAILQ_ENTRY(internal_list) next;
159 struct rte_eth_dev *eth_dev;
162 TAILQ_HEAD(internal_list_head, internal_list);
163 static struct internal_list_head internal_list =
164 TAILQ_HEAD_INITIALIZER(internal_list);
166 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
168 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
170 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
171 struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
176 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
177 for (i = 0; i < reserve_size; i++)
178 rte_pktmbuf_free(bufs[i]);
179 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
183 for (i = 0; i < reserve_size; i++) {
187 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
188 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
189 umem->mb_pool->header_size;
193 xsk_ring_prod__submit(fq, reserve_size);
199 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
200 struct rte_mbuf **bufs __rte_unused,
201 struct xsk_ring_prod *fq)
203 void *addrs[reserve_size];
207 if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
209 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
213 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
214 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
215 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
220 for (i = 0; i < reserve_size; i++) {
223 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
224 *fq_addr = (uint64_t)addrs[i];
227 xsk_ring_prod__submit(fq, reserve_size);
234 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
235 struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
237 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
238 return reserve_fill_queue_zc(umem, reserve_size, bufs, fq);
240 return reserve_fill_queue_cp(umem, reserve_size, bufs, fq);
244 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
246 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
248 struct pkt_rx_queue *rxq = queue;
249 struct xsk_ring_cons *rx = &rxq->rx;
250 struct xsk_ring_prod *fq = &rxq->fq;
251 struct xsk_umem_info *umem = rxq->umem;
253 unsigned long rx_bytes = 0;
255 struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
257 /* allocate bufs for fill queue replenishment after rx */
258 if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
260 "Failed to get enough buffers for fq.\n");
264 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
267 #if defined(XDP_USE_NEED_WAKEUP)
268 if (xsk_ring_prod__needs_wakeup(fq))
269 (void)poll(rxq->fds, 1, 1000);
275 for (i = 0; i < rcvd; i++) {
276 const struct xdp_desc *desc;
281 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
285 offset = xsk_umem__extract_offset(addr);
286 addr = xsk_umem__extract_addr(addr);
288 bufs[i] = (struct rte_mbuf *)
289 xsk_umem__get_data(umem->buffer, addr +
290 umem->mb_pool->header_size);
291 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
292 rte_pktmbuf_priv_size(umem->mb_pool) -
293 umem->mb_pool->header_size;
295 rte_pktmbuf_pkt_len(bufs[i]) = len;
296 rte_pktmbuf_data_len(bufs[i]) = len;
300 xsk_ring_cons__release(rx, rcvd);
302 (void)reserve_fill_queue(umem, rcvd, fq_bufs, fq);
305 rxq->stats.rx_pkts += rcvd;
306 rxq->stats.rx_bytes += rx_bytes;
310 rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd],
317 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
319 struct pkt_rx_queue *rxq = queue;
320 struct xsk_ring_cons *rx = &rxq->rx;
321 struct xsk_umem_info *umem = rxq->umem;
322 struct xsk_ring_prod *fq = &rxq->fq;
324 unsigned long rx_bytes = 0;
326 uint32_t free_thresh = fq->size >> 1;
327 struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
329 if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
330 (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE,
333 if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
336 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
338 #if defined(XDP_USE_NEED_WAKEUP)
339 if (xsk_ring_prod__needs_wakeup(fq))
340 (void)poll(rxq->fds, 1, 1000);
346 for (i = 0; i < rcvd; i++) {
347 const struct xdp_desc *desc;
352 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
355 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
357 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
358 rte_ring_enqueue(umem->buf_ring, (void *)addr);
359 rte_pktmbuf_pkt_len(mbufs[i]) = len;
360 rte_pktmbuf_data_len(mbufs[i]) = len;
365 xsk_ring_cons__release(rx, rcvd);
368 rxq->stats.rx_pkts += rcvd;
369 rxq->stats.rx_bytes += rx_bytes;
373 rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
381 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
383 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
385 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
386 return af_xdp_rx_zc(queue, bufs, nb_pkts);
388 return af_xdp_rx_cp(queue, bufs, nb_pkts);
393 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
398 n = xsk_ring_cons__peek(cq, size, &idx_cq);
400 for (i = 0; i < n; i++) {
402 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
403 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
404 addr = xsk_umem__extract_addr(addr);
405 rte_pktmbuf_free((struct rte_mbuf *)
406 xsk_umem__get_data(umem->buffer,
407 addr + umem->mb_pool->header_size));
409 rte_ring_enqueue(umem->buf_ring, (void *)addr);
413 xsk_ring_cons__release(cq, n);
417 kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
419 struct xsk_umem_info *umem = txq->umem;
421 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
423 #if defined(XDP_USE_NEED_WAKEUP)
424 if (xsk_ring_prod__needs_wakeup(&txq->tx))
426 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
427 0, MSG_DONTWAIT) < 0) {
428 /* some thing unexpected */
429 if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
432 /* pull from completion queue to leave more space */
435 XSK_RING_CONS__DEFAULT_NUM_DESCS,
440 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
442 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
444 struct pkt_tx_queue *txq = queue;
445 struct xsk_umem_info *umem = txq->umem;
446 struct rte_mbuf *mbuf;
447 unsigned long tx_bytes = 0;
451 struct xdp_desc *desc;
452 uint64_t addr, offset;
453 struct xsk_ring_cons *cq = &txq->pair->cq;
454 uint32_t free_thresh = cq->size >> 1;
456 if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
457 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
459 for (i = 0; i < nb_pkts; i++) {
462 if (mbuf->pool == umem->mb_pool) {
463 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
465 if (!xsk_ring_prod__reserve(&txq->tx, 1,
469 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
470 desc->len = mbuf->pkt_len;
471 addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
472 umem->mb_pool->header_size;
473 offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
475 umem->mb_pool->header_size;
476 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
477 desc->addr = addr | offset;
480 struct rte_mbuf *local_mbuf =
481 rte_pktmbuf_alloc(umem->mb_pool);
484 if (local_mbuf == NULL)
487 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
488 rte_pktmbuf_free(local_mbuf);
493 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
494 desc->len = mbuf->pkt_len;
496 addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
497 umem->mb_pool->header_size;
498 offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
499 (uint64_t)local_mbuf +
500 umem->mb_pool->header_size;
501 pkt = xsk_umem__get_data(umem->buffer, addr + offset);
502 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
503 desc->addr = addr | offset;
504 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
506 rte_pktmbuf_free(mbuf);
510 tx_bytes += mbuf->pkt_len;
516 xsk_ring_prod__submit(&txq->tx, count);
518 txq->stats.tx_pkts += count;
519 txq->stats.tx_bytes += tx_bytes;
520 txq->stats.tx_dropped += nb_pkts - count;
526 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
528 struct pkt_tx_queue *txq = queue;
529 struct xsk_umem_info *umem = txq->umem;
530 struct rte_mbuf *mbuf;
531 void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
532 unsigned long tx_bytes = 0;
535 struct xsk_ring_cons *cq = &txq->pair->cq;
537 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
539 pull_umem_cq(umem, nb_pkts, cq);
541 nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
546 if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
548 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
552 for (i = 0; i < nb_pkts; i++) {
553 struct xdp_desc *desc;
556 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
558 desc->len = mbuf->pkt_len;
560 desc->addr = (uint64_t)addrs[i];
561 pkt = xsk_umem__get_data(umem->mz->addr,
563 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
564 tx_bytes += mbuf->pkt_len;
565 rte_pktmbuf_free(mbuf);
568 xsk_ring_prod__submit(&txq->tx, nb_pkts);
572 txq->stats.tx_pkts += nb_pkts;
573 txq->stats.tx_bytes += tx_bytes;
580 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
582 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
583 return af_xdp_tx_zc(queue, bufs, nb_pkts);
585 return af_xdp_tx_cp(queue, bufs, nb_pkts);
590 eth_dev_start(struct rte_eth_dev *dev)
592 dev->data->dev_link.link_status = ETH_LINK_UP;
597 /* This function gets called when the current port gets stopped. */
599 eth_dev_stop(struct rte_eth_dev *dev)
601 dev->data->dev_link.link_status = ETH_LINK_DOWN;
604 /* Find ethdev in list */
605 static inline struct internal_list *
606 find_internal_resource(struct pmd_internals *port_int)
609 struct internal_list *list = NULL;
611 if (port_int == NULL)
614 pthread_mutex_lock(&internal_list_lock);
616 TAILQ_FOREACH(list, &internal_list, next) {
617 struct pmd_internals *list_int =
618 list->eth_dev->data->dev_private;
619 if (list_int == port_int) {
625 pthread_mutex_unlock(&internal_list_lock);
633 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
634 static inline struct xsk_umem_info *
635 get_shared_umem(struct pkt_rx_queue *rxq) {
636 struct internal_list *list;
637 struct pmd_internals *internals;
639 struct rte_mempool *mb_pool = rxq->mb_pool;
644 pthread_mutex_lock(&internal_list_lock);
646 TAILQ_FOREACH(list, &internal_list, next) {
647 internals = list->eth_dev->data->dev_private;
648 for (i = 0; i < internals->queue_cnt; i++) {
649 struct pkt_rx_queue *list_rxq =
650 &internals->rx_queues[i];
653 if (mb_pool == internals->rx_queues[i].mb_pool) {
655 &internals->rx_queues[i].umem->refcnt,
657 pthread_mutex_unlock(
658 &internal_list_lock);
659 return internals->rx_queues[i].umem;
665 pthread_mutex_unlock(&internal_list_lock);
671 eth_dev_configure(struct rte_eth_dev *dev)
673 struct pmd_internals *internal = dev->data->dev_private;
675 /* rx/tx must be paired */
676 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
679 if (internal->shared_umem) {
680 struct internal_list *list = NULL;
681 const char *name = dev->device->name;
683 /* Ensure PMD is not already inserted into the list */
684 list = find_internal_resource(internal);
688 list = rte_zmalloc_socket(name, sizeof(*list), 0,
689 dev->device->numa_node);
694 pthread_mutex_lock(&internal_list_lock);
695 TAILQ_INSERT_TAIL(&internal_list, list, next);
696 pthread_mutex_unlock(&internal_list_lock);
703 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
705 struct pmd_internals *internals = dev->data->dev_private;
707 dev_info->if_index = internals->if_index;
708 dev_info->max_mac_addrs = 1;
709 dev_info->max_rx_pktlen = ETH_FRAME_LEN;
710 dev_info->max_rx_queues = internals->queue_cnt;
711 dev_info->max_tx_queues = internals->queue_cnt;
713 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
714 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
715 dev_info->max_mtu = getpagesize() -
716 sizeof(struct rte_mempool_objhdr) -
717 sizeof(struct rte_mbuf) -
718 RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
720 dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
723 dev_info->default_rxportconf.nb_queues = 1;
724 dev_info->default_txportconf.nb_queues = 1;
725 dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
726 dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
732 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
734 struct pmd_internals *internals = dev->data->dev_private;
735 struct xdp_statistics xdp_stats;
736 struct pkt_rx_queue *rxq;
737 struct pkt_tx_queue *txq;
741 for (i = 0; i < dev->data->nb_rx_queues; i++) {
742 optlen = sizeof(struct xdp_statistics);
743 rxq = &internals->rx_queues[i];
745 stats->q_ipackets[i] = rxq->stats.rx_pkts;
746 stats->q_ibytes[i] = rxq->stats.rx_bytes;
748 stats->q_opackets[i] = txq->stats.tx_pkts;
749 stats->q_obytes[i] = txq->stats.tx_bytes;
751 stats->ipackets += stats->q_ipackets[i];
752 stats->ibytes += stats->q_ibytes[i];
753 stats->imissed += rxq->stats.rx_dropped;
754 stats->oerrors += txq->stats.tx_dropped;
755 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
756 XDP_STATISTICS, &xdp_stats, &optlen);
758 AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
761 stats->imissed += xdp_stats.rx_dropped;
763 stats->opackets += stats->q_opackets[i];
764 stats->obytes += stats->q_obytes[i];
771 eth_stats_reset(struct rte_eth_dev *dev)
773 struct pmd_internals *internals = dev->data->dev_private;
776 for (i = 0; i < internals->queue_cnt; i++) {
777 memset(&internals->rx_queues[i].stats, 0,
778 sizeof(struct rx_stats));
779 memset(&internals->tx_queues[i].stats, 0,
780 sizeof(struct tx_stats));
787 remove_xdp_program(struct pmd_internals *internals)
789 uint32_t curr_prog_id = 0;
791 if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
792 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
793 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
796 bpf_set_link_xdp_fd(internals->if_index, -1,
797 XDP_FLAGS_UPDATE_IF_NOEXIST);
801 xdp_umem_destroy(struct xsk_umem_info *umem)
803 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
804 umem->mb_pool = NULL;
806 rte_memzone_free(umem->mz);
809 rte_ring_free(umem->buf_ring);
810 umem->buf_ring = NULL;
818 eth_dev_close(struct rte_eth_dev *dev)
820 struct pmd_internals *internals = dev->data->dev_private;
821 struct pkt_rx_queue *rxq;
824 AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
827 for (i = 0; i < internals->queue_cnt; i++) {
828 rxq = &internals->rx_queues[i];
829 if (rxq->umem == NULL)
831 xsk_socket__delete(rxq->xsk);
833 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE)
835 (void)xsk_umem__delete(rxq->umem->umem);
836 xdp_umem_destroy(rxq->umem);
839 /* free pkt_tx_queue */
845 * MAC is not allocated dynamically, setting it to NULL would prevent
846 * from releasing it in rte_eth_dev_release_port.
848 dev->data->mac_addrs = NULL;
850 remove_xdp_program(internals);
852 if (internals->shared_umem) {
853 struct internal_list *list;
855 /* Remove ethdev from list used to track and share UMEMs */
856 list = find_internal_resource(internals);
858 pthread_mutex_lock(&internal_list_lock);
859 TAILQ_REMOVE(&internal_list, list, next);
860 pthread_mutex_unlock(&internal_list_lock);
869 eth_queue_release(void *q __rte_unused)
874 eth_link_update(struct rte_eth_dev *dev __rte_unused,
875 int wait_to_complete __rte_unused)
880 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
881 static inline uint64_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
883 struct rte_mempool_memhdr *memhdr;
884 uint64_t memhdr_addr, aligned_addr;
886 memhdr = STAILQ_FIRST(&mp->mem_list);
887 memhdr_addr = (uint64_t)memhdr->addr;
888 aligned_addr = memhdr_addr & ~(getpagesize() - 1);
889 *align = memhdr_addr - aligned_addr;
895 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
896 struct pkt_rx_queue *rxq)
898 struct xsk_umem_info *umem = NULL;
900 struct xsk_umem_config usr_config = {
901 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2,
902 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
903 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
904 void *base_addr = NULL;
905 struct rte_mempool *mb_pool = rxq->mb_pool;
906 uint64_t umem_size, align = 0;
908 if (internals->shared_umem) {
909 umem = get_shared_umem(rxq);
911 __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
913 AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
914 internals->if_name, rxq->xsk_queue_idx);
915 __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
920 usr_config.frame_size =
921 rte_mempool_calc_obj_size(mb_pool->elt_size,
922 mb_pool->flags, NULL);
923 usr_config.frame_headroom = mb_pool->header_size +
924 sizeof(struct rte_mbuf) +
925 rte_pktmbuf_priv_size(mb_pool) +
926 RTE_PKTMBUF_HEADROOM;
928 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
931 AF_XDP_LOG(ERR, "Failed to allocate umem info");
935 umem->mb_pool = mb_pool;
936 base_addr = (void *)get_base_addr(mb_pool, &align);
937 umem_size = mb_pool->populated_size * usr_config.frame_size +
940 ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
941 &rxq->fq, &rxq->cq, &usr_config);
943 AF_XDP_LOG(ERR, "Failed to create umem");
946 umem->buffer = base_addr;
948 if (internals->shared_umem) {
949 umem->max_xsks = mb_pool->populated_size /
950 ETH_AF_XDP_NUM_BUFFERS;
951 AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
952 mb_pool->name, umem->max_xsks);
955 __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
960 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
961 struct pkt_rx_queue *rxq)
963 struct xsk_umem_info *umem;
964 const struct rte_memzone *mz;
965 struct xsk_umem_config usr_config = {
966 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
967 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
968 .frame_size = ETH_AF_XDP_FRAME_SIZE,
969 .frame_headroom = 0 };
970 char ring_name[RTE_RING_NAMESIZE];
971 char mz_name[RTE_MEMZONE_NAMESIZE];
975 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
977 AF_XDP_LOG(ERR, "Failed to allocate umem info");
981 snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
982 internals->if_name, rxq->xsk_queue_idx);
983 umem->buf_ring = rte_ring_create(ring_name,
984 ETH_AF_XDP_NUM_BUFFERS,
987 if (umem->buf_ring == NULL) {
988 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
992 for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
993 rte_ring_enqueue(umem->buf_ring,
994 (void *)(i * ETH_AF_XDP_FRAME_SIZE));
996 snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
997 internals->if_name, rxq->xsk_queue_idx);
998 mz = rte_memzone_reserve_aligned(mz_name,
999 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1000 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1003 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
1007 ret = xsk_umem__create(&umem->umem, mz->addr,
1008 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1013 AF_XDP_LOG(ERR, "Failed to create umem");
1022 xdp_umem_destroy(umem);
1027 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
1030 struct xsk_socket_config cfg;
1031 struct pkt_tx_queue *txq = rxq->pair;
1033 int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS;
1034 struct rte_mbuf *fq_bufs[reserve_size];
1036 rxq->umem = xdp_umem_configure(internals, rxq);
1037 if (rxq->umem == NULL)
1039 txq->umem = rxq->umem;
1041 cfg.rx_size = ring_size;
1042 cfg.tx_size = ring_size;
1043 cfg.libbpf_flags = 0;
1044 cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
1047 #if defined(XDP_USE_NEED_WAKEUP)
1048 cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
1051 if (internals->shared_umem)
1052 ret = create_shared_socket(&rxq->xsk, internals->if_name,
1053 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1054 &txq->tx, &rxq->fq, &rxq->cq, &cfg);
1056 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
1057 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1061 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
1065 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1066 if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) {
1067 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
1071 ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
1073 xsk_socket__delete(rxq->xsk);
1074 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
1081 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
1082 xdp_umem_destroy(rxq->umem);
1088 eth_rx_queue_setup(struct rte_eth_dev *dev,
1089 uint16_t rx_queue_id,
1090 uint16_t nb_rx_desc,
1091 unsigned int socket_id __rte_unused,
1092 const struct rte_eth_rxconf *rx_conf __rte_unused,
1093 struct rte_mempool *mb_pool)
1095 struct pmd_internals *internals = dev->data->dev_private;
1096 struct pkt_rx_queue *rxq;
1099 rxq = &internals->rx_queues[rx_queue_id];
1101 AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
1102 rx_queue_id, rxq->xsk_queue_idx);
1104 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
1105 uint32_t buf_size, data_size;
1107 /* Now get the space available for data in the mbuf */
1108 buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1109 RTE_PKTMBUF_HEADROOM;
1110 data_size = ETH_AF_XDP_FRAME_SIZE;
1112 if (data_size > buf_size) {
1113 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
1114 dev->device->name, data_size, buf_size);
1120 rxq->mb_pool = mb_pool;
1122 if (xsk_configure(internals, rxq, nb_rx_desc)) {
1123 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
1128 rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
1129 rxq->fds[0].events = POLLIN;
1131 dev->data->rx_queues[rx_queue_id] = rxq;
1139 eth_tx_queue_setup(struct rte_eth_dev *dev,
1140 uint16_t tx_queue_id,
1141 uint16_t nb_tx_desc __rte_unused,
1142 unsigned int socket_id __rte_unused,
1143 const struct rte_eth_txconf *tx_conf __rte_unused)
1145 struct pmd_internals *internals = dev->data->dev_private;
1146 struct pkt_tx_queue *txq;
1148 txq = &internals->tx_queues[tx_queue_id];
1150 dev->data->tx_queues[tx_queue_id] = txq;
1155 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1157 struct pmd_internals *internals = dev->data->dev_private;
1158 struct ifreq ifr = { .ifr_mtu = mtu };
1162 s = socket(PF_INET, SOCK_DGRAM, 0);
1166 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
1167 ret = ioctl(s, SIOCSIFMTU, &ifr);
1170 return (ret < 0) ? -errno : 0;
1174 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1180 s = socket(PF_INET, SOCK_DGRAM, 0);
1184 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1185 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1189 ifr.ifr_flags &= mask;
1190 ifr.ifr_flags |= flags;
1191 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1201 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1203 struct pmd_internals *internals = dev->data->dev_private;
1205 return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1209 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1211 struct pmd_internals *internals = dev->data->dev_private;
1213 return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1216 static const struct eth_dev_ops ops = {
1217 .dev_start = eth_dev_start,
1218 .dev_stop = eth_dev_stop,
1219 .dev_close = eth_dev_close,
1220 .dev_configure = eth_dev_configure,
1221 .dev_infos_get = eth_dev_info,
1222 .mtu_set = eth_dev_mtu_set,
1223 .promiscuous_enable = eth_dev_promiscuous_enable,
1224 .promiscuous_disable = eth_dev_promiscuous_disable,
1225 .rx_queue_setup = eth_rx_queue_setup,
1226 .tx_queue_setup = eth_tx_queue_setup,
1227 .rx_queue_release = eth_queue_release,
1228 .tx_queue_release = eth_queue_release,
1229 .link_update = eth_link_update,
1230 .stats_get = eth_stats_get,
1231 .stats_reset = eth_stats_reset,
1234 /** parse integer from integer argument */
1236 parse_integer_arg(const char *key __rte_unused,
1237 const char *value, void *extra_args)
1239 int *i = (int *)extra_args;
1242 *i = strtol(value, &end, 10);
1244 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1251 /** parse name argument */
1253 parse_name_arg(const char *key __rte_unused,
1254 const char *value, void *extra_args)
1256 char *name = extra_args;
1258 if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1259 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1264 strlcpy(name, value, IFNAMSIZ);
1270 xdp_get_channels_info(const char *if_name, int *max_queues,
1271 int *combined_queues)
1273 struct ethtool_channels channels;
1277 fd = socket(AF_INET, SOCK_DGRAM, 0);
1281 channels.cmd = ETHTOOL_GCHANNELS;
1282 ifr.ifr_data = (void *)&channels;
1283 strncpy(ifr.ifr_name, if_name, IFNAMSIZ);
1284 ret = ioctl(fd, SIOCETHTOOL, &ifr);
1286 if (errno == EOPNOTSUPP) {
1294 if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1295 /* If the device says it has no channels, then all traffic
1296 * is sent to a single stream, so max queues = 1.
1299 *combined_queues = 1;
1301 *max_queues = channels.max_combined;
1302 *combined_queues = channels.combined_count;
1311 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1312 int *queue_cnt, int *shared_umem)
1316 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1317 &parse_name_arg, if_name);
1321 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1322 &parse_integer_arg, start_queue);
1326 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1327 &parse_integer_arg, queue_cnt);
1328 if (ret < 0 || *queue_cnt <= 0) {
1333 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG,
1334 &parse_integer_arg, shared_umem);
1339 rte_kvargs_free(kvlist);
1344 get_iface_info(const char *if_name,
1345 struct rte_ether_addr *eth_addr,
1349 int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1354 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1355 if (ioctl(sock, SIOCGIFINDEX, &ifr))
1358 *if_index = ifr.ifr_ifindex;
1360 if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1363 rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1373 static struct rte_eth_dev *
1374 init_internals(struct rte_vdev_device *dev, const char *if_name,
1375 int start_queue_idx, int queue_cnt, int shared_umem)
1377 const char *name = rte_vdev_device_name(dev);
1378 const unsigned int numa_node = dev->device.numa_node;
1379 struct pmd_internals *internals;
1380 struct rte_eth_dev *eth_dev;
1384 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1385 if (internals == NULL)
1388 internals->start_queue_idx = start_queue_idx;
1389 internals->queue_cnt = queue_cnt;
1390 strlcpy(internals->if_name, if_name, IFNAMSIZ);
1392 #ifndef ETH_AF_XDP_SHARED_UMEM
1394 AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
1395 "Check kernel and libbpf version\n");
1396 goto err_free_internals;
1399 internals->shared_umem = shared_umem;
1401 if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1402 &internals->combined_queue_cnt)) {
1403 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1405 goto err_free_internals;
1408 if (queue_cnt > internals->combined_queue_cnt) {
1409 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1410 queue_cnt, internals->combined_queue_cnt);
1411 goto err_free_internals;
1414 internals->rx_queues = rte_zmalloc_socket(NULL,
1415 sizeof(struct pkt_rx_queue) * queue_cnt,
1417 if (internals->rx_queues == NULL) {
1418 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1419 goto err_free_internals;
1422 internals->tx_queues = rte_zmalloc_socket(NULL,
1423 sizeof(struct pkt_tx_queue) * queue_cnt,
1425 if (internals->tx_queues == NULL) {
1426 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1429 for (i = 0; i < queue_cnt; i++) {
1430 internals->tx_queues[i].pair = &internals->rx_queues[i];
1431 internals->rx_queues[i].pair = &internals->tx_queues[i];
1432 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1433 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1436 ret = get_iface_info(if_name, &internals->eth_addr,
1437 &internals->if_index);
1441 eth_dev = rte_eth_vdev_allocate(dev, 0);
1442 if (eth_dev == NULL)
1445 eth_dev->data->dev_private = internals;
1446 eth_dev->data->dev_link = pmd_link;
1447 eth_dev->data->mac_addrs = &internals->eth_addr;
1448 eth_dev->dev_ops = &ops;
1449 eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1450 eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1451 /* Let rte_eth_dev_close() release the port resources. */
1452 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1454 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1455 AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1461 rte_free(internals->tx_queues);
1463 rte_free(internals->rx_queues);
1465 rte_free(internals);
1470 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1472 struct rte_kvargs *kvlist;
1473 char if_name[IFNAMSIZ] = {'\0'};
1474 int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1475 int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1476 int shared_umem = 0;
1477 struct rte_eth_dev *eth_dev = NULL;
1480 AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1481 rte_vdev_device_name(dev));
1483 name = rte_vdev_device_name(dev);
1484 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1485 strlen(rte_vdev_device_args(dev)) == 0) {
1486 eth_dev = rte_eth_dev_attach_secondary(name);
1487 if (eth_dev == NULL) {
1488 AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1491 eth_dev->dev_ops = &ops;
1492 rte_eth_dev_probing_finish(eth_dev);
1496 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1497 if (kvlist == NULL) {
1498 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1502 if (dev->device.numa_node == SOCKET_ID_ANY)
1503 dev->device.numa_node = rte_socket_id();
1505 if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1506 &xsk_queue_cnt, &shared_umem) < 0) {
1507 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1511 if (strlen(if_name) == 0) {
1512 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1516 eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1517 xsk_queue_cnt, shared_umem);
1518 if (eth_dev == NULL) {
1519 AF_XDP_LOG(ERR, "Failed to init internals\n");
1523 rte_eth_dev_probing_finish(eth_dev);
1529 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1531 struct rte_eth_dev *eth_dev = NULL;
1533 AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1539 /* find the ethdev entry */
1540 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1541 if (eth_dev == NULL)
1544 eth_dev_close(eth_dev);
1545 rte_eth_dev_release_port(eth_dev);
1551 static struct rte_vdev_driver pmd_af_xdp_drv = {
1552 .probe = rte_pmd_af_xdp_probe,
1553 .remove = rte_pmd_af_xdp_remove,
1556 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1557 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1559 "start_queue=<int> "
1560 "queue_count=<int> "
1561 "shared_umem=<int> ");