1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
9 #include <netinet/in.h>
11 #include <sys/socket.h>
12 #include <sys/ioctl.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_xdp.h>
15 #include <linux/if_link.h>
16 #include <linux/ethtool.h>
17 #include <linux/sockios.h>
18 #include "af_xdp_deps.h"
21 #include <rte_ethdev.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_bus_vdev.h>
26 #include <rte_string_fns.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
31 #include <rte_ether.h>
32 #include <rte_lcore.h>
34 #include <rte_memory.h>
35 #include <rte_memzone.h>
37 #include <rte_malloc.h>
52 static int af_xdp_logtype;
54 #define AF_XDP_LOG(level, fmt, args...) \
55 rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
56 "%s(): " fmt, __func__, ##args)
58 #define ETH_AF_XDP_FRAME_SIZE 2048
59 #define ETH_AF_XDP_NUM_BUFFERS 4096
60 #ifdef XDP_UMEM_UNALIGNED_CHUNK_FLAG
61 #define ETH_AF_XDP_MBUF_OVERHEAD 128 /* sizeof(struct rte_mbuf) */
62 #define ETH_AF_XDP_DATA_HEADROOM \
63 (ETH_AF_XDP_MBUF_OVERHEAD + RTE_PKTMBUF_HEADROOM)
65 #define ETH_AF_XDP_DATA_HEADROOM 0
67 #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS
68 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
69 #define ETH_AF_XDP_DFLT_QUEUE_COUNT 1
71 #define ETH_AF_XDP_RX_BATCH_SIZE 32
72 #define ETH_AF_XDP_TX_BATCH_SIZE 32
75 struct xsk_umem_info {
76 struct xsk_ring_prod fq;
77 struct xsk_ring_cons cq;
78 struct xsk_umem *umem;
79 struct rte_ring *buf_ring;
80 const struct rte_memzone *mz;
81 struct rte_mempool *mb_pool;
92 struct xsk_ring_cons rx;
93 struct xsk_umem_info *umem;
94 struct xsk_socket *xsk;
95 struct rte_mempool *mb_pool;
97 struct rx_stats stats;
99 struct pkt_tx_queue *pair;
100 struct pollfd fds[1];
110 struct pkt_tx_queue {
111 struct xsk_ring_prod tx;
112 struct xsk_umem_info *umem;
114 struct tx_stats stats;
116 struct pkt_rx_queue *pair;
120 struct pmd_internals {
122 char if_name[IFNAMSIZ];
126 int combined_queue_cnt;
128 struct rte_ether_addr eth_addr;
130 struct pkt_rx_queue *rx_queues;
131 struct pkt_tx_queue *tx_queues;
134 #define ETH_AF_XDP_IFACE_ARG "iface"
135 #define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
136 #define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
138 static const char * const valid_arguments[] = {
139 ETH_AF_XDP_IFACE_ARG,
140 ETH_AF_XDP_START_QUEUE_ARG,
141 ETH_AF_XDP_QUEUE_COUNT_ARG,
145 static const struct rte_eth_link pmd_link = {
146 .link_speed = ETH_SPEED_NUM_10G,
147 .link_duplex = ETH_LINK_FULL_DUPLEX,
148 .link_status = ETH_LINK_DOWN,
149 .link_autoneg = ETH_LINK_AUTONEG
152 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
154 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
155 struct rte_mbuf **bufs)
157 struct xsk_ring_prod *fq = &umem->fq;
161 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
162 for (i = 0; i < reserve_size; i++)
163 rte_pktmbuf_free(bufs[i]);
164 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
168 for (i = 0; i < reserve_size; i++) {
172 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
173 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer;
177 xsk_ring_prod__submit(fq, reserve_size);
183 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
184 struct rte_mbuf **bufs __rte_unused)
186 struct xsk_ring_prod *fq = &umem->fq;
187 void *addrs[reserve_size];
191 if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
193 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
197 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
198 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
199 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
204 for (i = 0; i < reserve_size; i++) {
207 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
208 *fq_addr = (uint64_t)addrs[i];
211 xsk_ring_prod__submit(fq, reserve_size);
218 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
219 struct rte_mbuf **bufs)
221 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
222 return reserve_fill_queue_zc(umem, reserve_size, bufs);
224 return reserve_fill_queue_cp(umem, reserve_size, bufs);
228 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
230 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
232 struct pkt_rx_queue *rxq = queue;
233 struct xsk_ring_cons *rx = &rxq->rx;
234 struct xsk_umem_info *umem = rxq->umem;
236 unsigned long rx_bytes = 0;
238 struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
240 /* allocate bufs for fill queue replenishment after rx */
241 if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
243 "Failed to get enough buffers for fq.\n");
247 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
250 #if defined(XDP_USE_NEED_WAKEUP)
251 if (xsk_ring_prod__needs_wakeup(&umem->fq))
252 (void)poll(rxq->fds, 1, 1000);
258 for (i = 0; i < rcvd; i++) {
259 const struct xdp_desc *desc;
264 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
268 offset = xsk_umem__extract_offset(addr);
269 addr = xsk_umem__extract_addr(addr);
271 bufs[i] = (struct rte_mbuf *)
272 xsk_umem__get_data(umem->buffer, addr);
273 bufs[i]->data_off = offset - sizeof(struct rte_mbuf);
275 rte_pktmbuf_pkt_len(bufs[i]) = len;
276 rte_pktmbuf_data_len(bufs[i]) = len;
280 xsk_ring_cons__release(rx, rcvd);
282 (void)reserve_fill_queue(umem, rcvd, fq_bufs);
285 rxq->stats.rx_pkts += rcvd;
286 rxq->stats.rx_bytes += rx_bytes;
290 rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd],
297 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
299 struct pkt_rx_queue *rxq = queue;
300 struct xsk_ring_cons *rx = &rxq->rx;
301 struct xsk_umem_info *umem = rxq->umem;
302 struct xsk_ring_prod *fq = &umem->fq;
304 unsigned long rx_bytes = 0;
306 uint32_t free_thresh = fq->size >> 1;
307 struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
309 if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
312 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
314 #if defined(XDP_USE_NEED_WAKEUP)
315 if (xsk_ring_prod__needs_wakeup(fq))
316 (void)poll(rxq->fds, 1, 1000);
322 if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
323 (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL);
325 for (i = 0; i < rcvd; i++) {
326 const struct xdp_desc *desc;
331 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
334 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
336 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
337 rte_ring_enqueue(umem->buf_ring, (void *)addr);
338 rte_pktmbuf_pkt_len(mbufs[i]) = len;
339 rte_pktmbuf_data_len(mbufs[i]) = len;
344 xsk_ring_cons__release(rx, rcvd);
347 rxq->stats.rx_pkts += rcvd;
348 rxq->stats.rx_bytes += rx_bytes;
352 rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
360 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
362 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
364 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
365 return af_xdp_rx_zc(queue, bufs, nb_pkts);
367 return af_xdp_rx_cp(queue, bufs, nb_pkts);
372 pull_umem_cq(struct xsk_umem_info *umem, int size)
374 struct xsk_ring_cons *cq = &umem->cq;
378 n = xsk_ring_cons__peek(cq, size, &idx_cq);
380 for (i = 0; i < n; i++) {
382 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
383 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
384 addr = xsk_umem__extract_addr(addr);
385 rte_pktmbuf_free((struct rte_mbuf *)
386 xsk_umem__get_data(umem->buffer, addr));
388 rte_ring_enqueue(umem->buf_ring, (void *)addr);
392 xsk_ring_cons__release(cq, n);
396 kick_tx(struct pkt_tx_queue *txq)
398 struct xsk_umem_info *umem = txq->umem;
400 #if defined(XDP_USE_NEED_WAKEUP)
401 if (xsk_ring_prod__needs_wakeup(&txq->tx))
403 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
404 0, MSG_DONTWAIT) < 0) {
405 /* some thing unexpected */
406 if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
409 /* pull from completion queue to leave more space */
411 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
413 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
414 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
418 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
420 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
422 struct pkt_tx_queue *txq = queue;
423 struct xsk_umem_info *umem = txq->umem;
424 struct rte_mbuf *mbuf;
425 unsigned long tx_bytes = 0;
429 struct xdp_desc *desc;
430 uint64_t addr, offset;
432 pull_umem_cq(umem, nb_pkts);
434 for (i = 0; i < nb_pkts; i++) {
437 if (mbuf->pool == umem->mb_pool) {
438 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
442 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
443 desc->len = mbuf->pkt_len;
444 addr = (uint64_t)mbuf - (uint64_t)umem->buffer;
445 offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
447 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
448 desc->addr = addr | offset;
451 struct rte_mbuf *local_mbuf =
452 rte_pktmbuf_alloc(umem->mb_pool);
455 if (local_mbuf == NULL)
458 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
459 rte_pktmbuf_free(local_mbuf);
464 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
465 desc->len = mbuf->pkt_len;
467 addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer;
468 offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
469 (uint64_t)local_mbuf;
470 pkt = xsk_umem__get_data(umem->buffer, addr + offset);
471 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
472 desc->addr = addr | offset;
473 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
475 rte_pktmbuf_free(mbuf);
479 tx_bytes += mbuf->pkt_len;
485 xsk_ring_prod__submit(&txq->tx, count);
487 txq->stats.tx_pkts += count;
488 txq->stats.tx_bytes += tx_bytes;
489 txq->stats.tx_dropped += nb_pkts - count;
495 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
497 struct pkt_tx_queue *txq = queue;
498 struct xsk_umem_info *umem = txq->umem;
499 struct rte_mbuf *mbuf;
500 void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
501 unsigned long tx_bytes = 0;
505 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
507 pull_umem_cq(umem, nb_pkts);
509 nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
514 if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
516 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
520 for (i = 0; i < nb_pkts; i++) {
521 struct xdp_desc *desc;
524 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
526 desc->len = mbuf->pkt_len;
528 desc->addr = (uint64_t)addrs[i];
529 pkt = xsk_umem__get_data(umem->mz->addr,
531 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
532 tx_bytes += mbuf->pkt_len;
533 rte_pktmbuf_free(mbuf);
536 xsk_ring_prod__submit(&txq->tx, nb_pkts);
540 txq->stats.tx_pkts += nb_pkts;
541 txq->stats.tx_bytes += tx_bytes;
548 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
550 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
551 return af_xdp_tx_zc(queue, bufs, nb_pkts);
553 return af_xdp_tx_cp(queue, bufs, nb_pkts);
558 eth_dev_start(struct rte_eth_dev *dev)
560 dev->data->dev_link.link_status = ETH_LINK_UP;
565 /* This function gets called when the current port gets stopped. */
567 eth_dev_stop(struct rte_eth_dev *dev)
569 dev->data->dev_link.link_status = ETH_LINK_DOWN;
573 eth_dev_configure(struct rte_eth_dev *dev)
575 /* rx/tx must be paired */
576 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
583 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
585 struct pmd_internals *internals = dev->data->dev_private;
587 dev_info->if_index = internals->if_index;
588 dev_info->max_mac_addrs = 1;
589 dev_info->max_rx_pktlen = ETH_FRAME_LEN;
590 dev_info->max_rx_queues = internals->queue_cnt;
591 dev_info->max_tx_queues = internals->queue_cnt;
593 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
594 dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
596 dev_info->default_rxportconf.nb_queues = 1;
597 dev_info->default_txportconf.nb_queues = 1;
598 dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
599 dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
605 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
607 struct pmd_internals *internals = dev->data->dev_private;
608 struct xdp_statistics xdp_stats;
609 struct pkt_rx_queue *rxq;
610 struct pkt_tx_queue *txq;
614 for (i = 0; i < dev->data->nb_rx_queues; i++) {
615 optlen = sizeof(struct xdp_statistics);
616 rxq = &internals->rx_queues[i];
618 stats->q_ipackets[i] = rxq->stats.rx_pkts;
619 stats->q_ibytes[i] = rxq->stats.rx_bytes;
621 stats->q_opackets[i] = txq->stats.tx_pkts;
622 stats->q_obytes[i] = txq->stats.tx_bytes;
624 stats->ipackets += stats->q_ipackets[i];
625 stats->ibytes += stats->q_ibytes[i];
626 stats->imissed += rxq->stats.rx_dropped;
627 stats->oerrors += txq->stats.tx_dropped;
628 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
629 XDP_STATISTICS, &xdp_stats, &optlen);
631 AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
634 stats->imissed += xdp_stats.rx_dropped;
636 stats->opackets += stats->q_opackets[i];
637 stats->obytes += stats->q_obytes[i];
644 eth_stats_reset(struct rte_eth_dev *dev)
646 struct pmd_internals *internals = dev->data->dev_private;
649 for (i = 0; i < internals->queue_cnt; i++) {
650 memset(&internals->rx_queues[i].stats, 0,
651 sizeof(struct rx_stats));
652 memset(&internals->tx_queues[i].stats, 0,
653 sizeof(struct tx_stats));
660 remove_xdp_program(struct pmd_internals *internals)
662 uint32_t curr_prog_id = 0;
664 if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
665 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
666 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
669 bpf_set_link_xdp_fd(internals->if_index, -1,
670 XDP_FLAGS_UPDATE_IF_NOEXIST);
674 xdp_umem_destroy(struct xsk_umem_info *umem)
676 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
677 rte_mempool_free(umem->mb_pool);
678 umem->mb_pool = NULL;
680 rte_memzone_free(umem->mz);
683 rte_ring_free(umem->buf_ring);
684 umem->buf_ring = NULL;
692 eth_dev_close(struct rte_eth_dev *dev)
694 struct pmd_internals *internals = dev->data->dev_private;
695 struct pkt_rx_queue *rxq;
698 AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
701 for (i = 0; i < internals->queue_cnt; i++) {
702 rxq = &internals->rx_queues[i];
703 if (rxq->umem == NULL)
705 xsk_socket__delete(rxq->xsk);
706 (void)xsk_umem__delete(rxq->umem->umem);
707 xdp_umem_destroy(rxq->umem);
709 /* free pkt_tx_queue */
715 * MAC is not allocated dynamically, setting it to NULL would prevent
716 * from releasing it in rte_eth_dev_release_port.
718 dev->data->mac_addrs = NULL;
720 remove_xdp_program(internals);
724 eth_queue_release(void *q __rte_unused)
729 eth_link_update(struct rte_eth_dev *dev __rte_unused,
730 int wait_to_complete __rte_unused)
735 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
736 static inline uint64_t get_base_addr(struct rte_mempool *mp)
738 struct rte_mempool_memhdr *memhdr;
740 memhdr = STAILQ_FIRST(&mp->mem_list);
741 return (uint64_t)memhdr->addr & ~(getpagesize() - 1);
745 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals __rte_unused,
746 struct pkt_rx_queue *rxq)
748 struct xsk_umem_info *umem;
750 struct xsk_umem_config usr_config = {
751 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
752 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
753 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
754 void *base_addr = NULL;
755 struct rte_mempool *mb_pool = rxq->mb_pool;
757 usr_config.frame_size = rte_pktmbuf_data_room_size(mb_pool) +
758 ETH_AF_XDP_MBUF_OVERHEAD +
759 mb_pool->private_data_size;
760 usr_config.frame_headroom = ETH_AF_XDP_DATA_HEADROOM +
761 mb_pool->private_data_size;
763 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
765 AF_XDP_LOG(ERR, "Failed to allocate umem info");
769 umem->mb_pool = mb_pool;
770 base_addr = (void *)get_base_addr(mb_pool);
772 ret = xsk_umem__create(&umem->umem, base_addr,
773 mb_pool->populated_size * usr_config.frame_size,
774 &umem->fq, &umem->cq,
778 AF_XDP_LOG(ERR, "Failed to create umem");
781 umem->buffer = base_addr;
785 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
786 struct pkt_rx_queue *rxq)
788 struct xsk_umem_info *umem;
789 const struct rte_memzone *mz;
790 struct xsk_umem_config usr_config = {
791 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
792 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
793 .frame_size = ETH_AF_XDP_FRAME_SIZE,
794 .frame_headroom = ETH_AF_XDP_DATA_HEADROOM };
795 char ring_name[RTE_RING_NAMESIZE];
796 char mz_name[RTE_MEMZONE_NAMESIZE];
800 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
802 AF_XDP_LOG(ERR, "Failed to allocate umem info");
806 snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
807 internals->if_name, rxq->xsk_queue_idx);
808 umem->buf_ring = rte_ring_create(ring_name,
809 ETH_AF_XDP_NUM_BUFFERS,
811 RING_F_SP_ENQ | RING_F_SC_DEQ);
812 if (umem->buf_ring == NULL) {
813 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
817 for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
818 rte_ring_enqueue(umem->buf_ring,
819 (void *)(i * ETH_AF_XDP_FRAME_SIZE +
820 ETH_AF_XDP_DATA_HEADROOM));
822 snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
823 internals->if_name, rxq->xsk_queue_idx);
824 mz = rte_memzone_reserve_aligned(mz_name,
825 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
826 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
829 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
833 ret = xsk_umem__create(&umem->umem, mz->addr,
834 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
835 &umem->fq, &umem->cq,
839 AF_XDP_LOG(ERR, "Failed to create umem");
848 xdp_umem_destroy(umem);
853 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
856 struct xsk_socket_config cfg;
857 struct pkt_tx_queue *txq = rxq->pair;
859 int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS / 2;
860 struct rte_mbuf *fq_bufs[reserve_size];
862 rxq->umem = xdp_umem_configure(internals, rxq);
863 if (rxq->umem == NULL)
865 txq->umem = rxq->umem;
867 cfg.rx_size = ring_size;
868 cfg.tx_size = ring_size;
869 cfg.libbpf_flags = 0;
870 cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
873 #if defined(XDP_USE_NEED_WAKEUP)
874 cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
877 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
878 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
881 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
885 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
886 if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) {
887 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
891 ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs);
893 xsk_socket__delete(rxq->xsk);
894 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
901 xdp_umem_destroy(rxq->umem);
907 eth_rx_queue_setup(struct rte_eth_dev *dev,
908 uint16_t rx_queue_id,
910 unsigned int socket_id __rte_unused,
911 const struct rte_eth_rxconf *rx_conf __rte_unused,
912 struct rte_mempool *mb_pool)
914 struct pmd_internals *internals = dev->data->dev_private;
915 struct pkt_rx_queue *rxq;
918 rxq = &internals->rx_queues[rx_queue_id];
920 AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
921 rx_queue_id, rxq->xsk_queue_idx);
923 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
924 uint32_t buf_size, data_size;
926 /* Now get the space available for data in the mbuf */
927 buf_size = rte_pktmbuf_data_room_size(mb_pool) -
928 RTE_PKTMBUF_HEADROOM;
929 data_size = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
931 if (data_size > buf_size) {
932 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
933 dev->device->name, data_size, buf_size);
939 rxq->mb_pool = mb_pool;
941 if (xsk_configure(internals, rxq, nb_rx_desc)) {
942 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
947 rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
948 rxq->fds[0].events = POLLIN;
950 dev->data->rx_queues[rx_queue_id] = rxq;
958 eth_tx_queue_setup(struct rte_eth_dev *dev,
959 uint16_t tx_queue_id,
960 uint16_t nb_tx_desc __rte_unused,
961 unsigned int socket_id __rte_unused,
962 const struct rte_eth_txconf *tx_conf __rte_unused)
964 struct pmd_internals *internals = dev->data->dev_private;
965 struct pkt_tx_queue *txq;
967 txq = &internals->tx_queues[tx_queue_id];
969 dev->data->tx_queues[tx_queue_id] = txq;
974 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
976 struct pmd_internals *internals = dev->data->dev_private;
977 struct ifreq ifr = { .ifr_mtu = mtu };
981 s = socket(PF_INET, SOCK_DGRAM, 0);
985 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
986 ret = ioctl(s, SIOCSIFMTU, &ifr);
989 return (ret < 0) ? -errno : 0;
993 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
999 s = socket(PF_INET, SOCK_DGRAM, 0);
1003 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1004 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1008 ifr.ifr_flags &= mask;
1009 ifr.ifr_flags |= flags;
1010 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1020 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1022 struct pmd_internals *internals = dev->data->dev_private;
1024 return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1028 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1030 struct pmd_internals *internals = dev->data->dev_private;
1032 return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1035 static const struct eth_dev_ops ops = {
1036 .dev_start = eth_dev_start,
1037 .dev_stop = eth_dev_stop,
1038 .dev_close = eth_dev_close,
1039 .dev_configure = eth_dev_configure,
1040 .dev_infos_get = eth_dev_info,
1041 .mtu_set = eth_dev_mtu_set,
1042 .promiscuous_enable = eth_dev_promiscuous_enable,
1043 .promiscuous_disable = eth_dev_promiscuous_disable,
1044 .rx_queue_setup = eth_rx_queue_setup,
1045 .tx_queue_setup = eth_tx_queue_setup,
1046 .rx_queue_release = eth_queue_release,
1047 .tx_queue_release = eth_queue_release,
1048 .link_update = eth_link_update,
1049 .stats_get = eth_stats_get,
1050 .stats_reset = eth_stats_reset,
1053 /** parse integer from integer argument */
1055 parse_integer_arg(const char *key __rte_unused,
1056 const char *value, void *extra_args)
1058 int *i = (int *)extra_args;
1061 *i = strtol(value, &end, 10);
1063 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1070 /** parse name argument */
1072 parse_name_arg(const char *key __rte_unused,
1073 const char *value, void *extra_args)
1075 char *name = extra_args;
1077 if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1078 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1083 strlcpy(name, value, IFNAMSIZ);
1089 xdp_get_channels_info(const char *if_name, int *max_queues,
1090 int *combined_queues)
1092 struct ethtool_channels channels;
1096 fd = socket(AF_INET, SOCK_DGRAM, 0);
1100 channels.cmd = ETHTOOL_GCHANNELS;
1101 ifr.ifr_data = (void *)&channels;
1102 strncpy(ifr.ifr_name, if_name, IFNAMSIZ);
1103 ret = ioctl(fd, SIOCETHTOOL, &ifr);
1105 if (errno == EOPNOTSUPP) {
1113 if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1114 /* If the device says it has no channels, then all traffic
1115 * is sent to a single stream, so max queues = 1.
1118 *combined_queues = 1;
1120 *max_queues = channels.max_combined;
1121 *combined_queues = channels.combined_count;
1130 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1135 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1136 &parse_name_arg, if_name);
1140 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1141 &parse_integer_arg, start_queue);
1145 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1146 &parse_integer_arg, queue_cnt);
1147 if (ret < 0 || *queue_cnt <= 0) {
1153 rte_kvargs_free(kvlist);
1158 get_iface_info(const char *if_name,
1159 struct rte_ether_addr *eth_addr,
1163 int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1168 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1169 if (ioctl(sock, SIOCGIFINDEX, &ifr))
1172 *if_index = ifr.ifr_ifindex;
1174 if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1177 rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1187 static struct rte_eth_dev *
1188 init_internals(struct rte_vdev_device *dev, const char *if_name,
1189 int start_queue_idx, int queue_cnt)
1191 const char *name = rte_vdev_device_name(dev);
1192 const unsigned int numa_node = dev->device.numa_node;
1193 struct pmd_internals *internals;
1194 struct rte_eth_dev *eth_dev;
1198 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1199 if (internals == NULL)
1202 internals->start_queue_idx = start_queue_idx;
1203 internals->queue_cnt = queue_cnt;
1204 strlcpy(internals->if_name, if_name, IFNAMSIZ);
1206 if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1207 &internals->combined_queue_cnt)) {
1208 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1210 goto err_free_internals;
1213 if (queue_cnt > internals->combined_queue_cnt) {
1214 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1215 queue_cnt, internals->combined_queue_cnt);
1216 goto err_free_internals;
1219 internals->rx_queues = rte_zmalloc_socket(NULL,
1220 sizeof(struct pkt_rx_queue) * queue_cnt,
1222 if (internals->rx_queues == NULL) {
1223 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1224 goto err_free_internals;
1227 internals->tx_queues = rte_zmalloc_socket(NULL,
1228 sizeof(struct pkt_tx_queue) * queue_cnt,
1230 if (internals->tx_queues == NULL) {
1231 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1234 for (i = 0; i < queue_cnt; i++) {
1235 internals->tx_queues[i].pair = &internals->rx_queues[i];
1236 internals->rx_queues[i].pair = &internals->tx_queues[i];
1237 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1238 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1241 ret = get_iface_info(if_name, &internals->eth_addr,
1242 &internals->if_index);
1246 eth_dev = rte_eth_vdev_allocate(dev, 0);
1247 if (eth_dev == NULL)
1250 eth_dev->data->dev_private = internals;
1251 eth_dev->data->dev_link = pmd_link;
1252 eth_dev->data->mac_addrs = &internals->eth_addr;
1253 eth_dev->dev_ops = &ops;
1254 eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1255 eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1256 /* Let rte_eth_dev_close() release the port resources. */
1257 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1259 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1260 AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1266 rte_free(internals->tx_queues);
1268 rte_free(internals->rx_queues);
1270 rte_free(internals);
1275 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1277 struct rte_kvargs *kvlist;
1278 char if_name[IFNAMSIZ] = {'\0'};
1279 int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1280 int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1281 struct rte_eth_dev *eth_dev = NULL;
1284 AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1285 rte_vdev_device_name(dev));
1287 name = rte_vdev_device_name(dev);
1288 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1289 strlen(rte_vdev_device_args(dev)) == 0) {
1290 eth_dev = rte_eth_dev_attach_secondary(name);
1291 if (eth_dev == NULL) {
1292 AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1295 eth_dev->dev_ops = &ops;
1296 rte_eth_dev_probing_finish(eth_dev);
1300 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1301 if (kvlist == NULL) {
1302 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1306 if (dev->device.numa_node == SOCKET_ID_ANY)
1307 dev->device.numa_node = rte_socket_id();
1309 if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1310 &xsk_queue_cnt) < 0) {
1311 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1315 if (strlen(if_name) == 0) {
1316 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1320 eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1322 if (eth_dev == NULL) {
1323 AF_XDP_LOG(ERR, "Failed to init internals\n");
1327 rte_eth_dev_probing_finish(eth_dev);
1333 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1335 struct rte_eth_dev *eth_dev = NULL;
1337 AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1343 /* find the ethdev entry */
1344 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1345 if (eth_dev == NULL)
1348 eth_dev_close(eth_dev);
1349 rte_eth_dev_release_port(eth_dev);
1355 static struct rte_vdev_driver pmd_af_xdp_drv = {
1356 .probe = rte_pmd_af_xdp_probe,
1357 .remove = rte_pmd_af_xdp_remove,
1360 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1361 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1363 "start_queue=<int> "
1364 "queue_count=<int> ");
1366 RTE_INIT(af_xdp_init_log)
1368 af_xdp_logtype = rte_log_register("pmd.net.af_xdp");
1369 if (af_xdp_logtype >= 0)
1370 rte_log_set_level(af_xdp_logtype, RTE_LOG_NOTICE);