1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
9 #include <netinet/in.h>
11 #include <sys/socket.h>
12 #include <sys/ioctl.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_xdp.h>
15 #include <linux/if_link.h>
16 #include <linux/ethtool.h>
17 #include <linux/sockios.h>
18 #include "af_xdp_deps.h"
21 #include <rte_ethdev.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_bus_vdev.h>
26 #include <rte_string_fns.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
31 #include <rte_ether.h>
32 #include <rte_lcore.h>
34 #include <rte_memory.h>
35 #include <rte_memzone.h>
36 #include <rte_mempool.h>
38 #include <rte_malloc.h>
53 static int af_xdp_logtype;
55 #define AF_XDP_LOG(level, fmt, args...) \
56 rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
57 "%s(): " fmt, __func__, ##args)
59 #define ETH_AF_XDP_FRAME_SIZE 2048
60 #define ETH_AF_XDP_NUM_BUFFERS 4096
61 #ifdef XDP_UMEM_UNALIGNED_CHUNK_FLAG
62 #define ETH_AF_XDP_MBUF_OVERHEAD 128 /* sizeof(struct rte_mbuf) */
63 #define ETH_AF_XDP_DATA_HEADROOM \
64 (ETH_AF_XDP_MBUF_OVERHEAD + RTE_PKTMBUF_HEADROOM)
66 #define ETH_AF_XDP_DATA_HEADROOM 0
68 #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS
69 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
70 #define ETH_AF_XDP_DFLT_QUEUE_COUNT 1
72 #define ETH_AF_XDP_RX_BATCH_SIZE 32
73 #define ETH_AF_XDP_TX_BATCH_SIZE 32
76 struct xsk_umem_info {
77 struct xsk_ring_prod fq;
78 struct xsk_ring_cons cq;
79 struct xsk_umem *umem;
80 struct rte_ring *buf_ring;
81 const struct rte_memzone *mz;
82 struct rte_mempool *mb_pool;
93 struct xsk_ring_cons rx;
94 struct xsk_umem_info *umem;
95 struct xsk_socket *xsk;
96 struct rte_mempool *mb_pool;
98 struct rx_stats stats;
100 struct pkt_tx_queue *pair;
101 struct pollfd fds[1];
111 struct pkt_tx_queue {
112 struct xsk_ring_prod tx;
113 struct xsk_umem_info *umem;
115 struct tx_stats stats;
117 struct pkt_rx_queue *pair;
121 struct pmd_internals {
123 char if_name[IFNAMSIZ];
127 int combined_queue_cnt;
129 struct rte_ether_addr eth_addr;
131 struct pkt_rx_queue *rx_queues;
132 struct pkt_tx_queue *tx_queues;
135 #define ETH_AF_XDP_IFACE_ARG "iface"
136 #define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
137 #define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
139 static const char * const valid_arguments[] = {
140 ETH_AF_XDP_IFACE_ARG,
141 ETH_AF_XDP_START_QUEUE_ARG,
142 ETH_AF_XDP_QUEUE_COUNT_ARG,
146 static const struct rte_eth_link pmd_link = {
147 .link_speed = ETH_SPEED_NUM_10G,
148 .link_duplex = ETH_LINK_FULL_DUPLEX,
149 .link_status = ETH_LINK_DOWN,
150 .link_autoneg = ETH_LINK_AUTONEG
153 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
155 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
156 struct rte_mbuf **bufs)
158 struct xsk_ring_prod *fq = &umem->fq;
162 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
163 for (i = 0; i < reserve_size; i++)
164 rte_pktmbuf_free(bufs[i]);
165 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
169 for (i = 0; i < reserve_size; i++) {
173 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
174 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
175 umem->mb_pool->header_size;
179 xsk_ring_prod__submit(fq, reserve_size);
185 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
186 struct rte_mbuf **bufs __rte_unused)
188 struct xsk_ring_prod *fq = &umem->fq;
189 void *addrs[reserve_size];
193 if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
195 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
199 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
200 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
201 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
206 for (i = 0; i < reserve_size; i++) {
209 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
210 *fq_addr = (uint64_t)addrs[i];
213 xsk_ring_prod__submit(fq, reserve_size);
220 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
221 struct rte_mbuf **bufs)
223 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
224 return reserve_fill_queue_zc(umem, reserve_size, bufs);
226 return reserve_fill_queue_cp(umem, reserve_size, bufs);
230 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
232 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
234 struct pkt_rx_queue *rxq = queue;
235 struct xsk_ring_cons *rx = &rxq->rx;
236 struct xsk_umem_info *umem = rxq->umem;
238 unsigned long rx_bytes = 0;
240 struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
242 /* allocate bufs for fill queue replenishment after rx */
243 if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
245 "Failed to get enough buffers for fq.\n");
249 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
252 #if defined(XDP_USE_NEED_WAKEUP)
253 if (xsk_ring_prod__needs_wakeup(&umem->fq))
254 (void)poll(rxq->fds, 1, 1000);
260 for (i = 0; i < rcvd; i++) {
261 const struct xdp_desc *desc;
266 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
270 offset = xsk_umem__extract_offset(addr);
271 addr = xsk_umem__extract_addr(addr);
273 bufs[i] = (struct rte_mbuf *)
274 xsk_umem__get_data(umem->buffer, addr +
275 umem->mb_pool->header_size);
276 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
277 rte_pktmbuf_priv_size(umem->mb_pool) -
278 umem->mb_pool->header_size;
280 rte_pktmbuf_pkt_len(bufs[i]) = len;
281 rte_pktmbuf_data_len(bufs[i]) = len;
285 xsk_ring_cons__release(rx, rcvd);
287 (void)reserve_fill_queue(umem, rcvd, fq_bufs);
290 rxq->stats.rx_pkts += rcvd;
291 rxq->stats.rx_bytes += rx_bytes;
295 rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd],
302 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
304 struct pkt_rx_queue *rxq = queue;
305 struct xsk_ring_cons *rx = &rxq->rx;
306 struct xsk_umem_info *umem = rxq->umem;
307 struct xsk_ring_prod *fq = &umem->fq;
309 unsigned long rx_bytes = 0;
311 uint32_t free_thresh = fq->size >> 1;
312 struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
314 if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
317 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
319 #if defined(XDP_USE_NEED_WAKEUP)
320 if (xsk_ring_prod__needs_wakeup(fq))
321 (void)poll(rxq->fds, 1, 1000);
327 if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
328 (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL);
330 for (i = 0; i < rcvd; i++) {
331 const struct xdp_desc *desc;
336 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
339 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
341 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
342 rte_ring_enqueue(umem->buf_ring, (void *)addr);
343 rte_pktmbuf_pkt_len(mbufs[i]) = len;
344 rte_pktmbuf_data_len(mbufs[i]) = len;
349 xsk_ring_cons__release(rx, rcvd);
352 rxq->stats.rx_pkts += rcvd;
353 rxq->stats.rx_bytes += rx_bytes;
357 rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
365 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
367 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
369 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
370 return af_xdp_rx_zc(queue, bufs, nb_pkts);
372 return af_xdp_rx_cp(queue, bufs, nb_pkts);
377 pull_umem_cq(struct xsk_umem_info *umem, int size)
379 struct xsk_ring_cons *cq = &umem->cq;
383 n = xsk_ring_cons__peek(cq, size, &idx_cq);
385 for (i = 0; i < n; i++) {
387 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
388 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
389 addr = xsk_umem__extract_addr(addr);
390 rte_pktmbuf_free((struct rte_mbuf *)
391 xsk_umem__get_data(umem->buffer,
392 addr + umem->mb_pool->header_size));
394 rte_ring_enqueue(umem->buf_ring, (void *)addr);
398 xsk_ring_cons__release(cq, n);
402 kick_tx(struct pkt_tx_queue *txq)
404 struct xsk_umem_info *umem = txq->umem;
406 #if defined(XDP_USE_NEED_WAKEUP)
407 if (xsk_ring_prod__needs_wakeup(&txq->tx))
409 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
410 0, MSG_DONTWAIT) < 0) {
411 /* some thing unexpected */
412 if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
415 /* pull from completion queue to leave more space */
417 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
419 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
420 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
424 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
426 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
428 struct pkt_tx_queue *txq = queue;
429 struct xsk_umem_info *umem = txq->umem;
430 struct rte_mbuf *mbuf;
431 unsigned long tx_bytes = 0;
435 struct xdp_desc *desc;
436 uint64_t addr, offset;
438 pull_umem_cq(umem, nb_pkts);
440 for (i = 0; i < nb_pkts; i++) {
443 if (mbuf->pool == umem->mb_pool) {
444 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
448 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
449 desc->len = mbuf->pkt_len;
450 addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
451 umem->mb_pool->header_size;
452 offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
454 umem->mb_pool->header_size;
455 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
456 desc->addr = addr | offset;
459 struct rte_mbuf *local_mbuf =
460 rte_pktmbuf_alloc(umem->mb_pool);
463 if (local_mbuf == NULL)
466 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
467 rte_pktmbuf_free(local_mbuf);
472 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
473 desc->len = mbuf->pkt_len;
475 addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
476 umem->mb_pool->header_size;
477 offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
478 (uint64_t)local_mbuf +
479 umem->mb_pool->header_size;
480 pkt = xsk_umem__get_data(umem->buffer, addr + offset);
481 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
482 desc->addr = addr | offset;
483 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
485 rte_pktmbuf_free(mbuf);
489 tx_bytes += mbuf->pkt_len;
495 xsk_ring_prod__submit(&txq->tx, count);
497 txq->stats.tx_pkts += count;
498 txq->stats.tx_bytes += tx_bytes;
499 txq->stats.tx_dropped += nb_pkts - count;
505 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
507 struct pkt_tx_queue *txq = queue;
508 struct xsk_umem_info *umem = txq->umem;
509 struct rte_mbuf *mbuf;
510 void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
511 unsigned long tx_bytes = 0;
515 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
517 pull_umem_cq(umem, nb_pkts);
519 nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
524 if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
526 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
530 for (i = 0; i < nb_pkts; i++) {
531 struct xdp_desc *desc;
534 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
536 desc->len = mbuf->pkt_len;
538 desc->addr = (uint64_t)addrs[i];
539 pkt = xsk_umem__get_data(umem->mz->addr,
541 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
542 tx_bytes += mbuf->pkt_len;
543 rte_pktmbuf_free(mbuf);
546 xsk_ring_prod__submit(&txq->tx, nb_pkts);
550 txq->stats.tx_pkts += nb_pkts;
551 txq->stats.tx_bytes += tx_bytes;
558 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
560 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
561 return af_xdp_tx_zc(queue, bufs, nb_pkts);
563 return af_xdp_tx_cp(queue, bufs, nb_pkts);
568 eth_dev_start(struct rte_eth_dev *dev)
570 dev->data->dev_link.link_status = ETH_LINK_UP;
575 /* This function gets called when the current port gets stopped. */
577 eth_dev_stop(struct rte_eth_dev *dev)
579 dev->data->dev_link.link_status = ETH_LINK_DOWN;
583 eth_dev_configure(struct rte_eth_dev *dev)
585 /* rx/tx must be paired */
586 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
593 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
595 struct pmd_internals *internals = dev->data->dev_private;
597 dev_info->if_index = internals->if_index;
598 dev_info->max_mac_addrs = 1;
599 dev_info->max_rx_pktlen = ETH_FRAME_LEN;
600 dev_info->max_rx_queues = internals->queue_cnt;
601 dev_info->max_tx_queues = internals->queue_cnt;
603 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
604 dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
606 dev_info->default_rxportconf.nb_queues = 1;
607 dev_info->default_txportconf.nb_queues = 1;
608 dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
609 dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
615 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
617 struct pmd_internals *internals = dev->data->dev_private;
618 struct xdp_statistics xdp_stats;
619 struct pkt_rx_queue *rxq;
620 struct pkt_tx_queue *txq;
624 for (i = 0; i < dev->data->nb_rx_queues; i++) {
625 optlen = sizeof(struct xdp_statistics);
626 rxq = &internals->rx_queues[i];
628 stats->q_ipackets[i] = rxq->stats.rx_pkts;
629 stats->q_ibytes[i] = rxq->stats.rx_bytes;
631 stats->q_opackets[i] = txq->stats.tx_pkts;
632 stats->q_obytes[i] = txq->stats.tx_bytes;
634 stats->ipackets += stats->q_ipackets[i];
635 stats->ibytes += stats->q_ibytes[i];
636 stats->imissed += rxq->stats.rx_dropped;
637 stats->oerrors += txq->stats.tx_dropped;
638 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
639 XDP_STATISTICS, &xdp_stats, &optlen);
641 AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
644 stats->imissed += xdp_stats.rx_dropped;
646 stats->opackets += stats->q_opackets[i];
647 stats->obytes += stats->q_obytes[i];
654 eth_stats_reset(struct rte_eth_dev *dev)
656 struct pmd_internals *internals = dev->data->dev_private;
659 for (i = 0; i < internals->queue_cnt; i++) {
660 memset(&internals->rx_queues[i].stats, 0,
661 sizeof(struct rx_stats));
662 memset(&internals->tx_queues[i].stats, 0,
663 sizeof(struct tx_stats));
670 remove_xdp_program(struct pmd_internals *internals)
672 uint32_t curr_prog_id = 0;
674 if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
675 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
676 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
679 bpf_set_link_xdp_fd(internals->if_index, -1,
680 XDP_FLAGS_UPDATE_IF_NOEXIST);
684 xdp_umem_destroy(struct xsk_umem_info *umem)
686 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
687 rte_mempool_free(umem->mb_pool);
688 umem->mb_pool = NULL;
690 rte_memzone_free(umem->mz);
693 rte_ring_free(umem->buf_ring);
694 umem->buf_ring = NULL;
702 eth_dev_close(struct rte_eth_dev *dev)
704 struct pmd_internals *internals = dev->data->dev_private;
705 struct pkt_rx_queue *rxq;
708 AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
711 for (i = 0; i < internals->queue_cnt; i++) {
712 rxq = &internals->rx_queues[i];
713 if (rxq->umem == NULL)
715 xsk_socket__delete(rxq->xsk);
716 (void)xsk_umem__delete(rxq->umem->umem);
717 xdp_umem_destroy(rxq->umem);
719 /* free pkt_tx_queue */
725 * MAC is not allocated dynamically, setting it to NULL would prevent
726 * from releasing it in rte_eth_dev_release_port.
728 dev->data->mac_addrs = NULL;
730 remove_xdp_program(internals);
734 eth_queue_release(void *q __rte_unused)
739 eth_link_update(struct rte_eth_dev *dev __rte_unused,
740 int wait_to_complete __rte_unused)
745 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
746 static inline uint64_t get_base_addr(struct rte_mempool *mp)
748 struct rte_mempool_memhdr *memhdr;
750 memhdr = STAILQ_FIRST(&mp->mem_list);
751 return (uint64_t)memhdr->addr & ~(getpagesize() - 1);
755 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals __rte_unused,
756 struct pkt_rx_queue *rxq)
758 struct xsk_umem_info *umem;
760 struct xsk_umem_config usr_config = {
761 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
762 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
763 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
764 void *base_addr = NULL;
765 struct rte_mempool *mb_pool = rxq->mb_pool;
767 usr_config.frame_size = rte_mempool_calc_obj_size(mb_pool->elt_size,
770 usr_config.frame_headroom = mb_pool->header_size +
771 sizeof(struct rte_mbuf) +
772 rte_pktmbuf_priv_size(mb_pool) +
773 RTE_PKTMBUF_HEADROOM;
775 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
777 AF_XDP_LOG(ERR, "Failed to allocate umem info");
781 umem->mb_pool = mb_pool;
782 base_addr = (void *)get_base_addr(mb_pool);
784 ret = xsk_umem__create(&umem->umem, base_addr,
785 mb_pool->populated_size * usr_config.frame_size,
786 &umem->fq, &umem->cq,
790 AF_XDP_LOG(ERR, "Failed to create umem");
793 umem->buffer = base_addr;
797 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
798 struct pkt_rx_queue *rxq)
800 struct xsk_umem_info *umem;
801 const struct rte_memzone *mz;
802 struct xsk_umem_config usr_config = {
803 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
804 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
805 .frame_size = ETH_AF_XDP_FRAME_SIZE,
806 .frame_headroom = ETH_AF_XDP_DATA_HEADROOM };
807 char ring_name[RTE_RING_NAMESIZE];
808 char mz_name[RTE_MEMZONE_NAMESIZE];
812 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
814 AF_XDP_LOG(ERR, "Failed to allocate umem info");
818 snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
819 internals->if_name, rxq->xsk_queue_idx);
820 umem->buf_ring = rte_ring_create(ring_name,
821 ETH_AF_XDP_NUM_BUFFERS,
823 RING_F_SP_ENQ | RING_F_SC_DEQ);
824 if (umem->buf_ring == NULL) {
825 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
829 for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
830 rte_ring_enqueue(umem->buf_ring,
831 (void *)(i * ETH_AF_XDP_FRAME_SIZE +
832 ETH_AF_XDP_DATA_HEADROOM));
834 snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
835 internals->if_name, rxq->xsk_queue_idx);
836 mz = rte_memzone_reserve_aligned(mz_name,
837 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
838 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
841 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
845 ret = xsk_umem__create(&umem->umem, mz->addr,
846 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
847 &umem->fq, &umem->cq,
851 AF_XDP_LOG(ERR, "Failed to create umem");
860 xdp_umem_destroy(umem);
865 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
868 struct xsk_socket_config cfg;
869 struct pkt_tx_queue *txq = rxq->pair;
871 int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS / 2;
872 struct rte_mbuf *fq_bufs[reserve_size];
874 rxq->umem = xdp_umem_configure(internals, rxq);
875 if (rxq->umem == NULL)
877 txq->umem = rxq->umem;
879 cfg.rx_size = ring_size;
880 cfg.tx_size = ring_size;
881 cfg.libbpf_flags = 0;
882 cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
885 #if defined(XDP_USE_NEED_WAKEUP)
886 cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
889 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
890 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
893 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
897 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
898 if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) {
899 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
903 ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs);
905 xsk_socket__delete(rxq->xsk);
906 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
913 xdp_umem_destroy(rxq->umem);
919 eth_rx_queue_setup(struct rte_eth_dev *dev,
920 uint16_t rx_queue_id,
922 unsigned int socket_id __rte_unused,
923 const struct rte_eth_rxconf *rx_conf __rte_unused,
924 struct rte_mempool *mb_pool)
926 struct pmd_internals *internals = dev->data->dev_private;
927 struct pkt_rx_queue *rxq;
930 rxq = &internals->rx_queues[rx_queue_id];
932 AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
933 rx_queue_id, rxq->xsk_queue_idx);
935 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
936 uint32_t buf_size, data_size;
938 /* Now get the space available for data in the mbuf */
939 buf_size = rte_pktmbuf_data_room_size(mb_pool) -
940 RTE_PKTMBUF_HEADROOM;
941 data_size = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
943 if (data_size > buf_size) {
944 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
945 dev->device->name, data_size, buf_size);
951 rxq->mb_pool = mb_pool;
953 if (xsk_configure(internals, rxq, nb_rx_desc)) {
954 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
959 rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
960 rxq->fds[0].events = POLLIN;
962 dev->data->rx_queues[rx_queue_id] = rxq;
970 eth_tx_queue_setup(struct rte_eth_dev *dev,
971 uint16_t tx_queue_id,
972 uint16_t nb_tx_desc __rte_unused,
973 unsigned int socket_id __rte_unused,
974 const struct rte_eth_txconf *tx_conf __rte_unused)
976 struct pmd_internals *internals = dev->data->dev_private;
977 struct pkt_tx_queue *txq;
979 txq = &internals->tx_queues[tx_queue_id];
981 dev->data->tx_queues[tx_queue_id] = txq;
986 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
988 struct pmd_internals *internals = dev->data->dev_private;
989 struct ifreq ifr = { .ifr_mtu = mtu };
993 s = socket(PF_INET, SOCK_DGRAM, 0);
997 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
998 ret = ioctl(s, SIOCSIFMTU, &ifr);
1001 return (ret < 0) ? -errno : 0;
1005 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1011 s = socket(PF_INET, SOCK_DGRAM, 0);
1015 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1016 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1020 ifr.ifr_flags &= mask;
1021 ifr.ifr_flags |= flags;
1022 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1032 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1034 struct pmd_internals *internals = dev->data->dev_private;
1036 return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1040 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1042 struct pmd_internals *internals = dev->data->dev_private;
1044 return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1047 static const struct eth_dev_ops ops = {
1048 .dev_start = eth_dev_start,
1049 .dev_stop = eth_dev_stop,
1050 .dev_close = eth_dev_close,
1051 .dev_configure = eth_dev_configure,
1052 .dev_infos_get = eth_dev_info,
1053 .mtu_set = eth_dev_mtu_set,
1054 .promiscuous_enable = eth_dev_promiscuous_enable,
1055 .promiscuous_disable = eth_dev_promiscuous_disable,
1056 .rx_queue_setup = eth_rx_queue_setup,
1057 .tx_queue_setup = eth_tx_queue_setup,
1058 .rx_queue_release = eth_queue_release,
1059 .tx_queue_release = eth_queue_release,
1060 .link_update = eth_link_update,
1061 .stats_get = eth_stats_get,
1062 .stats_reset = eth_stats_reset,
1065 /** parse integer from integer argument */
1067 parse_integer_arg(const char *key __rte_unused,
1068 const char *value, void *extra_args)
1070 int *i = (int *)extra_args;
1073 *i = strtol(value, &end, 10);
1075 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1082 /** parse name argument */
1084 parse_name_arg(const char *key __rte_unused,
1085 const char *value, void *extra_args)
1087 char *name = extra_args;
1089 if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1090 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1095 strlcpy(name, value, IFNAMSIZ);
1101 xdp_get_channels_info(const char *if_name, int *max_queues,
1102 int *combined_queues)
1104 struct ethtool_channels channels;
1108 fd = socket(AF_INET, SOCK_DGRAM, 0);
1112 channels.cmd = ETHTOOL_GCHANNELS;
1113 ifr.ifr_data = (void *)&channels;
1114 strncpy(ifr.ifr_name, if_name, IFNAMSIZ);
1115 ret = ioctl(fd, SIOCETHTOOL, &ifr);
1117 if (errno == EOPNOTSUPP) {
1125 if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1126 /* If the device says it has no channels, then all traffic
1127 * is sent to a single stream, so max queues = 1.
1130 *combined_queues = 1;
1132 *max_queues = channels.max_combined;
1133 *combined_queues = channels.combined_count;
1142 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1147 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1148 &parse_name_arg, if_name);
1152 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1153 &parse_integer_arg, start_queue);
1157 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1158 &parse_integer_arg, queue_cnt);
1159 if (ret < 0 || *queue_cnt <= 0) {
1165 rte_kvargs_free(kvlist);
1170 get_iface_info(const char *if_name,
1171 struct rte_ether_addr *eth_addr,
1175 int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1180 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1181 if (ioctl(sock, SIOCGIFINDEX, &ifr))
1184 *if_index = ifr.ifr_ifindex;
1186 if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1189 rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1199 static struct rte_eth_dev *
1200 init_internals(struct rte_vdev_device *dev, const char *if_name,
1201 int start_queue_idx, int queue_cnt)
1203 const char *name = rte_vdev_device_name(dev);
1204 const unsigned int numa_node = dev->device.numa_node;
1205 struct pmd_internals *internals;
1206 struct rte_eth_dev *eth_dev;
1210 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1211 if (internals == NULL)
1214 internals->start_queue_idx = start_queue_idx;
1215 internals->queue_cnt = queue_cnt;
1216 strlcpy(internals->if_name, if_name, IFNAMSIZ);
1218 if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1219 &internals->combined_queue_cnt)) {
1220 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1222 goto err_free_internals;
1225 if (queue_cnt > internals->combined_queue_cnt) {
1226 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1227 queue_cnt, internals->combined_queue_cnt);
1228 goto err_free_internals;
1231 internals->rx_queues = rte_zmalloc_socket(NULL,
1232 sizeof(struct pkt_rx_queue) * queue_cnt,
1234 if (internals->rx_queues == NULL) {
1235 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1236 goto err_free_internals;
1239 internals->tx_queues = rte_zmalloc_socket(NULL,
1240 sizeof(struct pkt_tx_queue) * queue_cnt,
1242 if (internals->tx_queues == NULL) {
1243 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1246 for (i = 0; i < queue_cnt; i++) {
1247 internals->tx_queues[i].pair = &internals->rx_queues[i];
1248 internals->rx_queues[i].pair = &internals->tx_queues[i];
1249 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1250 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1253 ret = get_iface_info(if_name, &internals->eth_addr,
1254 &internals->if_index);
1258 eth_dev = rte_eth_vdev_allocate(dev, 0);
1259 if (eth_dev == NULL)
1262 eth_dev->data->dev_private = internals;
1263 eth_dev->data->dev_link = pmd_link;
1264 eth_dev->data->mac_addrs = &internals->eth_addr;
1265 eth_dev->dev_ops = &ops;
1266 eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1267 eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1268 /* Let rte_eth_dev_close() release the port resources. */
1269 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1271 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1272 AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1278 rte_free(internals->tx_queues);
1280 rte_free(internals->rx_queues);
1282 rte_free(internals);
1287 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1289 struct rte_kvargs *kvlist;
1290 char if_name[IFNAMSIZ] = {'\0'};
1291 int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1292 int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1293 struct rte_eth_dev *eth_dev = NULL;
1296 AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1297 rte_vdev_device_name(dev));
1299 name = rte_vdev_device_name(dev);
1300 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1301 strlen(rte_vdev_device_args(dev)) == 0) {
1302 eth_dev = rte_eth_dev_attach_secondary(name);
1303 if (eth_dev == NULL) {
1304 AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1307 eth_dev->dev_ops = &ops;
1308 rte_eth_dev_probing_finish(eth_dev);
1312 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1313 if (kvlist == NULL) {
1314 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1318 if (dev->device.numa_node == SOCKET_ID_ANY)
1319 dev->device.numa_node = rte_socket_id();
1321 if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1322 &xsk_queue_cnt) < 0) {
1323 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1327 if (strlen(if_name) == 0) {
1328 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1332 eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1334 if (eth_dev == NULL) {
1335 AF_XDP_LOG(ERR, "Failed to init internals\n");
1339 rte_eth_dev_probing_finish(eth_dev);
1345 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1347 struct rte_eth_dev *eth_dev = NULL;
1349 AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1355 /* find the ethdev entry */
1356 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1357 if (eth_dev == NULL)
1360 eth_dev_close(eth_dev);
1361 rte_eth_dev_release_port(eth_dev);
1367 static struct rte_vdev_driver pmd_af_xdp_drv = {
1368 .probe = rte_pmd_af_xdp_probe,
1369 .remove = rte_pmd_af_xdp_remove,
1372 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1373 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1375 "start_queue=<int> "
1376 "queue_count=<int> ");
1378 RTE_INIT(af_xdp_init_log)
1380 af_xdp_logtype = rte_log_register("pmd.net.af_xdp");
1381 if (af_xdp_logtype >= 0)
1382 rte_log_set_level(af_xdp_logtype, RTE_LOG_NOTICE);