1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
9 #include <netinet/in.h>
11 #include <sys/socket.h>
12 #include <sys/ioctl.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_xdp.h>
15 #include <linux/if_link.h>
16 #include <linux/ethtool.h>
17 #include <linux/sockios.h>
18 #include "af_xdp_deps.h"
21 #include <rte_ethdev.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_bus_vdev.h>
26 #include <rte_string_fns.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_config.h>
32 #include <rte_ether.h>
33 #include <rte_lcore.h>
35 #include <rte_memory.h>
36 #include <rte_memzone.h>
38 #include <rte_malloc.h>
53 static int af_xdp_logtype;
55 #define AF_XDP_LOG(level, fmt, args...) \
56 rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
57 "%s(): " fmt, __func__, ##args)
59 #define ETH_AF_XDP_FRAME_SIZE 2048
60 #define ETH_AF_XDP_NUM_BUFFERS 4096
61 #ifdef XDP_UMEM_UNALIGNED_CHUNK_FLAG
62 #define ETH_AF_XDP_MBUF_OVERHEAD 128 /* sizeof(struct rte_mbuf) */
63 #define ETH_AF_XDP_DATA_HEADROOM \
64 (ETH_AF_XDP_MBUF_OVERHEAD + RTE_PKTMBUF_HEADROOM)
66 #define ETH_AF_XDP_DATA_HEADROOM 0
68 #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS
69 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
70 #define ETH_AF_XDP_DFLT_QUEUE_COUNT 1
72 #define ETH_AF_XDP_RX_BATCH_SIZE 32
73 #define ETH_AF_XDP_TX_BATCH_SIZE 32
76 struct xsk_umem_info {
77 struct xsk_ring_prod fq;
78 struct xsk_ring_cons cq;
79 struct xsk_umem *umem;
80 struct rte_ring *buf_ring;
81 const struct rte_memzone *mz;
82 struct rte_mempool *mb_pool;
93 struct xsk_ring_cons rx;
94 struct xsk_umem_info *umem;
95 struct xsk_socket *xsk;
96 struct rte_mempool *mb_pool;
98 struct rx_stats stats;
100 struct pkt_tx_queue *pair;
101 struct pollfd fds[1];
111 struct pkt_tx_queue {
112 struct xsk_ring_prod tx;
113 struct xsk_umem_info *umem;
115 struct tx_stats stats;
117 struct pkt_rx_queue *pair;
121 struct pmd_internals {
123 char if_name[IFNAMSIZ];
127 int combined_queue_cnt;
129 struct rte_ether_addr eth_addr;
131 struct pkt_rx_queue *rx_queues;
132 struct pkt_tx_queue *tx_queues;
135 #define ETH_AF_XDP_IFACE_ARG "iface"
136 #define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
137 #define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
139 static const char * const valid_arguments[] = {
140 ETH_AF_XDP_IFACE_ARG,
141 ETH_AF_XDP_START_QUEUE_ARG,
142 ETH_AF_XDP_QUEUE_COUNT_ARG,
146 static const struct rte_eth_link pmd_link = {
147 .link_speed = ETH_SPEED_NUM_10G,
148 .link_duplex = ETH_LINK_FULL_DUPLEX,
149 .link_status = ETH_LINK_DOWN,
150 .link_autoneg = ETH_LINK_AUTONEG
153 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
155 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
156 struct rte_mbuf **bufs)
158 struct xsk_ring_prod *fq = &umem->fq;
162 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
163 for (i = 0; i < reserve_size; i++)
164 rte_pktmbuf_free(bufs[i]);
165 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
169 for (i = 0; i < reserve_size; i++) {
173 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
174 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer;
178 xsk_ring_prod__submit(fq, reserve_size);
184 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
185 struct rte_mbuf **bufs __rte_unused)
187 struct xsk_ring_prod *fq = &umem->fq;
188 void *addrs[reserve_size];
192 if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
194 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
198 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
199 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
200 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
205 for (i = 0; i < reserve_size; i++) {
208 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
209 *fq_addr = (uint64_t)addrs[i];
212 xsk_ring_prod__submit(fq, reserve_size);
219 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
220 struct rte_mbuf **bufs)
222 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
223 return reserve_fill_queue_zc(umem, reserve_size, bufs);
225 return reserve_fill_queue_cp(umem, reserve_size, bufs);
229 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
231 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
233 struct pkt_rx_queue *rxq = queue;
234 struct xsk_ring_cons *rx = &rxq->rx;
235 struct xsk_umem_info *umem = rxq->umem;
237 unsigned long rx_bytes = 0;
239 struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
241 /* allocate bufs for fill queue replenishment after rx */
242 if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
244 "Failed to get enough buffers for fq.\n");
248 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
251 #if defined(XDP_USE_NEED_WAKEUP)
252 if (xsk_ring_prod__needs_wakeup(&umem->fq))
253 (void)poll(rxq->fds, 1, 1000);
259 for (i = 0; i < rcvd; i++) {
260 const struct xdp_desc *desc;
265 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
269 offset = xsk_umem__extract_offset(addr);
270 addr = xsk_umem__extract_addr(addr);
272 bufs[i] = (struct rte_mbuf *)
273 xsk_umem__get_data(umem->buffer, addr);
274 bufs[i]->data_off = offset - sizeof(struct rte_mbuf);
276 rte_pktmbuf_pkt_len(bufs[i]) = len;
277 rte_pktmbuf_data_len(bufs[i]) = len;
281 xsk_ring_cons__release(rx, rcvd);
283 (void)reserve_fill_queue(umem, rcvd, fq_bufs);
286 rxq->stats.rx_pkts += rcvd;
287 rxq->stats.rx_bytes += rx_bytes;
291 rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd],
298 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
300 struct pkt_rx_queue *rxq = queue;
301 struct xsk_ring_cons *rx = &rxq->rx;
302 struct xsk_umem_info *umem = rxq->umem;
303 struct xsk_ring_prod *fq = &umem->fq;
305 unsigned long rx_bytes = 0;
307 uint32_t free_thresh = fq->size >> 1;
308 struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
310 if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
313 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
315 #if defined(XDP_USE_NEED_WAKEUP)
316 if (xsk_ring_prod__needs_wakeup(fq))
317 (void)poll(rxq->fds, 1, 1000);
323 if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
324 (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL);
326 for (i = 0; i < rcvd; i++) {
327 const struct xdp_desc *desc;
332 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
335 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
337 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
338 rte_ring_enqueue(umem->buf_ring, (void *)addr);
339 rte_pktmbuf_pkt_len(mbufs[i]) = len;
340 rte_pktmbuf_data_len(mbufs[i]) = len;
345 xsk_ring_cons__release(rx, rcvd);
348 rxq->stats.rx_pkts += rcvd;
349 rxq->stats.rx_bytes += rx_bytes;
353 rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
361 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
363 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
365 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
366 return af_xdp_rx_zc(queue, bufs, nb_pkts);
368 return af_xdp_rx_cp(queue, bufs, nb_pkts);
373 pull_umem_cq(struct xsk_umem_info *umem, int size)
375 struct xsk_ring_cons *cq = &umem->cq;
379 n = xsk_ring_cons__peek(cq, size, &idx_cq);
381 for (i = 0; i < n; i++) {
383 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
384 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
385 addr = xsk_umem__extract_addr(addr);
386 rte_pktmbuf_free((struct rte_mbuf *)
387 xsk_umem__get_data(umem->buffer, addr));
389 rte_ring_enqueue(umem->buf_ring, (void *)addr);
393 xsk_ring_cons__release(cq, n);
397 kick_tx(struct pkt_tx_queue *txq)
399 struct xsk_umem_info *umem = txq->umem;
401 #if defined(XDP_USE_NEED_WAKEUP)
402 if (xsk_ring_prod__needs_wakeup(&txq->tx))
404 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
405 0, MSG_DONTWAIT) < 0) {
406 /* some thing unexpected */
407 if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
410 /* pull from completion queue to leave more space */
412 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
414 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
415 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
419 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
421 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
423 struct pkt_tx_queue *txq = queue;
424 struct xsk_umem_info *umem = txq->umem;
425 struct rte_mbuf *mbuf;
426 unsigned long tx_bytes = 0;
430 struct xdp_desc *desc;
431 uint64_t addr, offset;
433 pull_umem_cq(umem, nb_pkts);
435 for (i = 0; i < nb_pkts; i++) {
438 if (mbuf->pool == umem->mb_pool) {
439 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
443 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
444 desc->len = mbuf->pkt_len;
445 addr = (uint64_t)mbuf - (uint64_t)umem->buffer;
446 offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
448 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
449 desc->addr = addr | offset;
452 struct rte_mbuf *local_mbuf =
453 rte_pktmbuf_alloc(umem->mb_pool);
456 if (local_mbuf == NULL)
459 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
460 rte_pktmbuf_free(local_mbuf);
465 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
466 desc->len = mbuf->pkt_len;
468 addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer;
469 offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
470 (uint64_t)local_mbuf;
471 pkt = xsk_umem__get_data(umem->buffer, addr + offset);
472 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
473 desc->addr = addr | offset;
474 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
476 rte_pktmbuf_free(mbuf);
480 tx_bytes += mbuf->pkt_len;
486 xsk_ring_prod__submit(&txq->tx, count);
488 txq->stats.tx_pkts += count;
489 txq->stats.tx_bytes += tx_bytes;
490 txq->stats.tx_dropped += nb_pkts - count;
496 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
498 struct pkt_tx_queue *txq = queue;
499 struct xsk_umem_info *umem = txq->umem;
500 struct rte_mbuf *mbuf;
501 void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
502 unsigned long tx_bytes = 0;
506 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
508 pull_umem_cq(umem, nb_pkts);
510 nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
515 if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
517 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
521 for (i = 0; i < nb_pkts; i++) {
522 struct xdp_desc *desc;
525 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
527 desc->len = mbuf->pkt_len;
529 desc->addr = (uint64_t)addrs[i];
530 pkt = xsk_umem__get_data(umem->mz->addr,
532 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
533 tx_bytes += mbuf->pkt_len;
534 rte_pktmbuf_free(mbuf);
537 xsk_ring_prod__submit(&txq->tx, nb_pkts);
541 txq->stats.tx_pkts += nb_pkts;
542 txq->stats.tx_bytes += tx_bytes;
549 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
551 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
552 return af_xdp_tx_zc(queue, bufs, nb_pkts);
554 return af_xdp_tx_cp(queue, bufs, nb_pkts);
559 eth_dev_start(struct rte_eth_dev *dev)
561 dev->data->dev_link.link_status = ETH_LINK_UP;
566 /* This function gets called when the current port gets stopped. */
568 eth_dev_stop(struct rte_eth_dev *dev)
570 dev->data->dev_link.link_status = ETH_LINK_DOWN;
574 eth_dev_configure(struct rte_eth_dev *dev)
576 /* rx/tx must be paired */
577 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
584 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
586 struct pmd_internals *internals = dev->data->dev_private;
588 dev_info->if_index = internals->if_index;
589 dev_info->max_mac_addrs = 1;
590 dev_info->max_rx_pktlen = ETH_FRAME_LEN;
591 dev_info->max_rx_queues = internals->queue_cnt;
592 dev_info->max_tx_queues = internals->queue_cnt;
594 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
595 dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
597 dev_info->default_rxportconf.nb_queues = 1;
598 dev_info->default_txportconf.nb_queues = 1;
599 dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
600 dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
606 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
608 struct pmd_internals *internals = dev->data->dev_private;
609 struct xdp_statistics xdp_stats;
610 struct pkt_rx_queue *rxq;
611 struct pkt_tx_queue *txq;
615 for (i = 0; i < dev->data->nb_rx_queues; i++) {
616 optlen = sizeof(struct xdp_statistics);
617 rxq = &internals->rx_queues[i];
619 stats->q_ipackets[i] = rxq->stats.rx_pkts;
620 stats->q_ibytes[i] = rxq->stats.rx_bytes;
622 stats->q_opackets[i] = txq->stats.tx_pkts;
623 stats->q_obytes[i] = txq->stats.tx_bytes;
625 stats->ipackets += stats->q_ipackets[i];
626 stats->ibytes += stats->q_ibytes[i];
627 stats->imissed += rxq->stats.rx_dropped;
628 stats->oerrors += txq->stats.tx_dropped;
629 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
630 XDP_STATISTICS, &xdp_stats, &optlen);
632 AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
635 stats->imissed += xdp_stats.rx_dropped;
637 stats->opackets += stats->q_opackets[i];
638 stats->obytes += stats->q_obytes[i];
645 eth_stats_reset(struct rte_eth_dev *dev)
647 struct pmd_internals *internals = dev->data->dev_private;
650 for (i = 0; i < internals->queue_cnt; i++) {
651 memset(&internals->rx_queues[i].stats, 0,
652 sizeof(struct rx_stats));
653 memset(&internals->tx_queues[i].stats, 0,
654 sizeof(struct tx_stats));
661 remove_xdp_program(struct pmd_internals *internals)
663 uint32_t curr_prog_id = 0;
665 if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
666 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
667 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
670 bpf_set_link_xdp_fd(internals->if_index, -1,
671 XDP_FLAGS_UPDATE_IF_NOEXIST);
675 xdp_umem_destroy(struct xsk_umem_info *umem)
677 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
678 rte_mempool_free(umem->mb_pool);
679 umem->mb_pool = NULL;
681 rte_memzone_free(umem->mz);
684 rte_ring_free(umem->buf_ring);
685 umem->buf_ring = NULL;
693 eth_dev_close(struct rte_eth_dev *dev)
695 struct pmd_internals *internals = dev->data->dev_private;
696 struct pkt_rx_queue *rxq;
699 AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
702 for (i = 0; i < internals->queue_cnt; i++) {
703 rxq = &internals->rx_queues[i];
704 if (rxq->umem == NULL)
706 xsk_socket__delete(rxq->xsk);
707 (void)xsk_umem__delete(rxq->umem->umem);
708 xdp_umem_destroy(rxq->umem);
710 /* free pkt_tx_queue */
716 * MAC is not allocated dynamically, setting it to NULL would prevent
717 * from releasing it in rte_eth_dev_release_port.
719 dev->data->mac_addrs = NULL;
721 remove_xdp_program(internals);
725 eth_queue_release(void *q __rte_unused)
730 eth_link_update(struct rte_eth_dev *dev __rte_unused,
731 int wait_to_complete __rte_unused)
736 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
737 static inline uint64_t get_base_addr(struct rte_mempool *mp)
739 struct rte_mempool_memhdr *memhdr;
741 memhdr = STAILQ_FIRST(&mp->mem_list);
742 return (uint64_t)memhdr->addr & ~(getpagesize() - 1);
746 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals __rte_unused,
747 struct pkt_rx_queue *rxq)
749 struct xsk_umem_info *umem;
751 struct xsk_umem_config usr_config = {
752 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
753 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
754 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
755 void *base_addr = NULL;
756 struct rte_mempool *mb_pool = rxq->mb_pool;
758 usr_config.frame_size = rte_pktmbuf_data_room_size(mb_pool) +
759 ETH_AF_XDP_MBUF_OVERHEAD +
760 mb_pool->private_data_size;
761 usr_config.frame_headroom = ETH_AF_XDP_DATA_HEADROOM +
762 mb_pool->private_data_size;
764 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
766 AF_XDP_LOG(ERR, "Failed to allocate umem info");
770 umem->mb_pool = mb_pool;
771 base_addr = (void *)get_base_addr(mb_pool);
773 ret = xsk_umem__create(&umem->umem, base_addr,
774 mb_pool->populated_size * usr_config.frame_size,
775 &umem->fq, &umem->cq,
779 AF_XDP_LOG(ERR, "Failed to create umem");
782 umem->buffer = base_addr;
786 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
787 struct pkt_rx_queue *rxq)
789 struct xsk_umem_info *umem;
790 const struct rte_memzone *mz;
791 struct xsk_umem_config usr_config = {
792 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
793 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
794 .frame_size = ETH_AF_XDP_FRAME_SIZE,
795 .frame_headroom = ETH_AF_XDP_DATA_HEADROOM };
796 char ring_name[RTE_RING_NAMESIZE];
797 char mz_name[RTE_MEMZONE_NAMESIZE];
801 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
803 AF_XDP_LOG(ERR, "Failed to allocate umem info");
807 snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
808 internals->if_name, rxq->xsk_queue_idx);
809 umem->buf_ring = rte_ring_create(ring_name,
810 ETH_AF_XDP_NUM_BUFFERS,
813 if (umem->buf_ring == NULL) {
814 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
818 for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
819 rte_ring_enqueue(umem->buf_ring,
820 (void *)(i * ETH_AF_XDP_FRAME_SIZE +
821 ETH_AF_XDP_DATA_HEADROOM));
823 snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
824 internals->if_name, rxq->xsk_queue_idx);
825 mz = rte_memzone_reserve_aligned(mz_name,
826 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
827 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
830 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
834 ret = xsk_umem__create(&umem->umem, mz->addr,
835 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
836 &umem->fq, &umem->cq,
840 AF_XDP_LOG(ERR, "Failed to create umem");
849 xdp_umem_destroy(umem);
854 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
857 struct xsk_socket_config cfg;
858 struct pkt_tx_queue *txq = rxq->pair;
860 int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS / 2;
861 struct rte_mbuf *fq_bufs[reserve_size];
863 rxq->umem = xdp_umem_configure(internals, rxq);
864 if (rxq->umem == NULL)
866 txq->umem = rxq->umem;
868 cfg.rx_size = ring_size;
869 cfg.tx_size = ring_size;
870 cfg.libbpf_flags = 0;
871 cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
874 #if defined(XDP_USE_NEED_WAKEUP)
875 cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
878 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
879 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
882 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
886 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
887 if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) {
888 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
892 ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs);
894 xsk_socket__delete(rxq->xsk);
895 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
902 xdp_umem_destroy(rxq->umem);
908 eth_rx_queue_setup(struct rte_eth_dev *dev,
909 uint16_t rx_queue_id,
911 unsigned int socket_id __rte_unused,
912 const struct rte_eth_rxconf *rx_conf __rte_unused,
913 struct rte_mempool *mb_pool)
915 struct pmd_internals *internals = dev->data->dev_private;
916 struct pkt_rx_queue *rxq;
919 rxq = &internals->rx_queues[rx_queue_id];
921 AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
922 rx_queue_id, rxq->xsk_queue_idx);
924 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
925 uint32_t buf_size, data_size;
927 /* Now get the space available for data in the mbuf */
928 buf_size = rte_pktmbuf_data_room_size(mb_pool) -
929 RTE_PKTMBUF_HEADROOM;
930 data_size = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
932 if (data_size > buf_size) {
933 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
934 dev->device->name, data_size, buf_size);
940 rxq->mb_pool = mb_pool;
942 if (xsk_configure(internals, rxq, nb_rx_desc)) {
943 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
948 rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
949 rxq->fds[0].events = POLLIN;
951 dev->data->rx_queues[rx_queue_id] = rxq;
959 eth_tx_queue_setup(struct rte_eth_dev *dev,
960 uint16_t tx_queue_id,
961 uint16_t nb_tx_desc __rte_unused,
962 unsigned int socket_id __rte_unused,
963 const struct rte_eth_txconf *tx_conf __rte_unused)
965 struct pmd_internals *internals = dev->data->dev_private;
966 struct pkt_tx_queue *txq;
968 txq = &internals->tx_queues[tx_queue_id];
970 dev->data->tx_queues[tx_queue_id] = txq;
975 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
977 struct pmd_internals *internals = dev->data->dev_private;
978 struct ifreq ifr = { .ifr_mtu = mtu };
982 s = socket(PF_INET, SOCK_DGRAM, 0);
986 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
987 ret = ioctl(s, SIOCSIFMTU, &ifr);
990 return (ret < 0) ? -errno : 0;
994 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1000 s = socket(PF_INET, SOCK_DGRAM, 0);
1004 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1005 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1009 ifr.ifr_flags &= mask;
1010 ifr.ifr_flags |= flags;
1011 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1021 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1023 struct pmd_internals *internals = dev->data->dev_private;
1025 return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1029 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1031 struct pmd_internals *internals = dev->data->dev_private;
1033 return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1036 static const struct eth_dev_ops ops = {
1037 .dev_start = eth_dev_start,
1038 .dev_stop = eth_dev_stop,
1039 .dev_close = eth_dev_close,
1040 .dev_configure = eth_dev_configure,
1041 .dev_infos_get = eth_dev_info,
1042 .mtu_set = eth_dev_mtu_set,
1043 .promiscuous_enable = eth_dev_promiscuous_enable,
1044 .promiscuous_disable = eth_dev_promiscuous_disable,
1045 .rx_queue_setup = eth_rx_queue_setup,
1046 .tx_queue_setup = eth_tx_queue_setup,
1047 .rx_queue_release = eth_queue_release,
1048 .tx_queue_release = eth_queue_release,
1049 .link_update = eth_link_update,
1050 .stats_get = eth_stats_get,
1051 .stats_reset = eth_stats_reset,
1054 /** parse integer from integer argument */
1056 parse_integer_arg(const char *key __rte_unused,
1057 const char *value, void *extra_args)
1059 int *i = (int *)extra_args;
1062 *i = strtol(value, &end, 10);
1064 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1071 /** parse name argument */
1073 parse_name_arg(const char *key __rte_unused,
1074 const char *value, void *extra_args)
1076 char *name = extra_args;
1078 if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1079 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1084 strlcpy(name, value, IFNAMSIZ);
1090 xdp_get_channels_info(const char *if_name, int *max_queues,
1091 int *combined_queues)
1093 struct ethtool_channels channels;
1097 fd = socket(AF_INET, SOCK_DGRAM, 0);
1101 channels.cmd = ETHTOOL_GCHANNELS;
1102 ifr.ifr_data = (void *)&channels;
1103 strncpy(ifr.ifr_name, if_name, IFNAMSIZ);
1104 ret = ioctl(fd, SIOCETHTOOL, &ifr);
1106 if (errno == EOPNOTSUPP) {
1114 if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1115 /* If the device says it has no channels, then all traffic
1116 * is sent to a single stream, so max queues = 1.
1119 *combined_queues = 1;
1121 *max_queues = channels.max_combined;
1122 *combined_queues = channels.combined_count;
1131 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1136 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1137 &parse_name_arg, if_name);
1141 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1142 &parse_integer_arg, start_queue);
1146 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1147 &parse_integer_arg, queue_cnt);
1148 if (ret < 0 || *queue_cnt <= 0) {
1154 rte_kvargs_free(kvlist);
1159 get_iface_info(const char *if_name,
1160 struct rte_ether_addr *eth_addr,
1164 int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1169 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1170 if (ioctl(sock, SIOCGIFINDEX, &ifr))
1173 *if_index = ifr.ifr_ifindex;
1175 if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1178 rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1188 static struct rte_eth_dev *
1189 init_internals(struct rte_vdev_device *dev, const char *if_name,
1190 int start_queue_idx, int queue_cnt)
1192 const char *name = rte_vdev_device_name(dev);
1193 const unsigned int numa_node = dev->device.numa_node;
1194 struct pmd_internals *internals;
1195 struct rte_eth_dev *eth_dev;
1199 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1200 if (internals == NULL)
1203 internals->start_queue_idx = start_queue_idx;
1204 internals->queue_cnt = queue_cnt;
1205 strlcpy(internals->if_name, if_name, IFNAMSIZ);
1207 if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1208 &internals->combined_queue_cnt)) {
1209 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1211 goto err_free_internals;
1214 if (queue_cnt > internals->combined_queue_cnt) {
1215 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1216 queue_cnt, internals->combined_queue_cnt);
1217 goto err_free_internals;
1220 internals->rx_queues = rte_zmalloc_socket(NULL,
1221 sizeof(struct pkt_rx_queue) * queue_cnt,
1223 if (internals->rx_queues == NULL) {
1224 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1225 goto err_free_internals;
1228 internals->tx_queues = rte_zmalloc_socket(NULL,
1229 sizeof(struct pkt_tx_queue) * queue_cnt,
1231 if (internals->tx_queues == NULL) {
1232 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1235 for (i = 0; i < queue_cnt; i++) {
1236 internals->tx_queues[i].pair = &internals->rx_queues[i];
1237 internals->rx_queues[i].pair = &internals->tx_queues[i];
1238 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1239 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1242 ret = get_iface_info(if_name, &internals->eth_addr,
1243 &internals->if_index);
1247 eth_dev = rte_eth_vdev_allocate(dev, 0);
1248 if (eth_dev == NULL)
1251 eth_dev->data->dev_private = internals;
1252 eth_dev->data->dev_link = pmd_link;
1253 eth_dev->data->mac_addrs = &internals->eth_addr;
1254 eth_dev->dev_ops = &ops;
1255 eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1256 eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1257 /* Let rte_eth_dev_close() release the port resources. */
1258 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1260 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1261 AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1267 rte_free(internals->tx_queues);
1269 rte_free(internals->rx_queues);
1271 rte_free(internals);
1276 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1278 struct rte_kvargs *kvlist;
1279 char if_name[IFNAMSIZ] = {'\0'};
1280 int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1281 int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1282 struct rte_eth_dev *eth_dev = NULL;
1285 AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1286 rte_vdev_device_name(dev));
1288 name = rte_vdev_device_name(dev);
1289 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1290 strlen(rte_vdev_device_args(dev)) == 0) {
1291 eth_dev = rte_eth_dev_attach_secondary(name);
1292 if (eth_dev == NULL) {
1293 AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1296 eth_dev->dev_ops = &ops;
1297 rte_eth_dev_probing_finish(eth_dev);
1301 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1302 if (kvlist == NULL) {
1303 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1307 if (dev->device.numa_node == SOCKET_ID_ANY)
1308 dev->device.numa_node = rte_socket_id();
1310 if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1311 &xsk_queue_cnt) < 0) {
1312 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1316 if (strlen(if_name) == 0) {
1317 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1321 eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1323 if (eth_dev == NULL) {
1324 AF_XDP_LOG(ERR, "Failed to init internals\n");
1328 rte_eth_dev_probing_finish(eth_dev);
1334 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1336 struct rte_eth_dev *eth_dev = NULL;
1338 AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1344 /* find the ethdev entry */
1345 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1346 if (eth_dev == NULL)
1349 eth_dev_close(eth_dev);
1350 rte_eth_dev_release_port(eth_dev);
1356 static struct rte_vdev_driver pmd_af_xdp_drv = {
1357 .probe = rte_pmd_af_xdp_probe,
1358 .remove = rte_pmd_af_xdp_remove,
1361 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1362 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1364 "start_queue=<int> "
1365 "queue_count=<int> ");
1367 RTE_INIT(af_xdp_init_log)
1369 af_xdp_logtype = rte_log_register("pmd.net.af_xdp");
1370 if (af_xdp_logtype >= 0)
1371 rte_log_set_level(af_xdp_logtype, RTE_LOG_NOTICE);