1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
9 #include <netinet/in.h>
11 #include <sys/socket.h>
12 #include <sys/ioctl.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_xdp.h>
15 #include <linux/if_link.h>
16 #include <linux/ethtool.h>
17 #include <linux/sockios.h>
18 #include "af_xdp_deps.h"
21 #include <rte_ethdev.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_bus_vdev.h>
26 #include <rte_string_fns.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_config.h>
32 #include <rte_ether.h>
33 #include <rte_lcore.h>
35 #include <rte_memory.h>
36 #include <rte_memzone.h>
38 #include <rte_malloc.h>
53 static int af_xdp_logtype;
55 #define AF_XDP_LOG(level, fmt, args...) \
56 rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
57 "%s(): " fmt, __func__, ##args)
59 #define ETH_AF_XDP_FRAME_SIZE 2048
60 #define ETH_AF_XDP_NUM_BUFFERS 4096
61 #ifdef XDP_UMEM_UNALIGNED_CHUNK_FLAG
62 #define ETH_AF_XDP_MBUF_OVERHEAD 128 /* sizeof(struct rte_mbuf) */
63 #define ETH_AF_XDP_DATA_HEADROOM \
64 (ETH_AF_XDP_MBUF_OVERHEAD + RTE_PKTMBUF_HEADROOM)
66 #define ETH_AF_XDP_DATA_HEADROOM 0
68 #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS
69 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
70 #define ETH_AF_XDP_DFLT_QUEUE_COUNT 1
72 #define ETH_AF_XDP_RX_BATCH_SIZE 32
73 #define ETH_AF_XDP_TX_BATCH_SIZE 32
76 struct xsk_umem_info {
77 struct xsk_ring_prod fq;
78 struct xsk_ring_cons cq;
79 struct xsk_umem *umem;
80 struct rte_ring *buf_ring;
81 const struct rte_memzone *mz;
82 struct rte_mempool *mb_pool;
93 struct xsk_ring_cons rx;
94 struct xsk_umem_info *umem;
95 struct xsk_socket *xsk;
96 struct rte_mempool *mb_pool;
98 struct rx_stats stats;
100 struct pkt_tx_queue *pair;
101 struct pollfd fds[1];
111 struct pkt_tx_queue {
112 struct xsk_ring_prod tx;
113 struct xsk_umem_info *umem;
115 struct tx_stats stats;
117 struct pkt_rx_queue *pair;
121 struct pmd_internals {
123 char if_name[IFNAMSIZ];
127 int combined_queue_cnt;
129 struct rte_ether_addr eth_addr;
131 struct pkt_rx_queue *rx_queues;
132 struct pkt_tx_queue *tx_queues;
135 #define ETH_AF_XDP_IFACE_ARG "iface"
136 #define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
137 #define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
139 static const char * const valid_arguments[] = {
140 ETH_AF_XDP_IFACE_ARG,
141 ETH_AF_XDP_START_QUEUE_ARG,
142 ETH_AF_XDP_QUEUE_COUNT_ARG,
146 static const struct rte_eth_link pmd_link = {
147 .link_speed = ETH_SPEED_NUM_10G,
148 .link_duplex = ETH_LINK_FULL_DUPLEX,
149 .link_status = ETH_LINK_DOWN,
150 .link_autoneg = ETH_LINK_AUTONEG
153 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
155 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
156 struct rte_mbuf **bufs)
158 struct xsk_ring_prod *fq = &umem->fq;
162 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
163 for (i = 0; i < reserve_size; i++)
164 rte_pktmbuf_free(bufs[i]);
165 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
169 for (i = 0; i < reserve_size; i++) {
173 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
174 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer;
178 xsk_ring_prod__submit(fq, reserve_size);
184 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
185 struct rte_mbuf **bufs __rte_unused)
187 struct xsk_ring_prod *fq = &umem->fq;
188 void *addrs[reserve_size];
192 if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
194 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
198 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
199 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
200 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
205 for (i = 0; i < reserve_size; i++) {
208 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
209 *fq_addr = (uint64_t)addrs[i];
212 xsk_ring_prod__submit(fq, reserve_size);
219 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
220 struct rte_mbuf **bufs)
222 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
223 return reserve_fill_queue_zc(umem, reserve_size, bufs);
225 return reserve_fill_queue_cp(umem, reserve_size, bufs);
229 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
231 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
233 struct pkt_rx_queue *rxq = queue;
234 struct xsk_ring_cons *rx = &rxq->rx;
235 struct xsk_umem_info *umem = rxq->umem;
237 unsigned long rx_bytes = 0;
239 struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
241 /* allocate bufs for fill queue replenishment after rx */
242 if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
244 "Failed to get enough buffers for fq.\n");
248 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
251 #if defined(XDP_USE_NEED_WAKEUP)
252 if (xsk_ring_prod__needs_wakeup(&umem->fq))
253 (void)poll(rxq->fds, 1, 1000);
259 for (i = 0; i < rcvd; i++) {
260 const struct xdp_desc *desc;
265 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
269 offset = xsk_umem__extract_offset(addr);
270 addr = xsk_umem__extract_addr(addr);
272 bufs[i] = (struct rte_mbuf *)
273 xsk_umem__get_data(umem->buffer, addr);
274 bufs[i]->data_off = offset - sizeof(struct rte_mbuf);
276 rte_pktmbuf_pkt_len(bufs[i]) = len;
277 rte_pktmbuf_data_len(bufs[i]) = len;
281 xsk_ring_cons__release(rx, rcvd);
283 (void)reserve_fill_queue(umem, rcvd, fq_bufs);
286 rxq->stats.rx_pkts += rcvd;
287 rxq->stats.rx_bytes += rx_bytes;
291 rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd],
298 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
300 struct pkt_rx_queue *rxq = queue;
301 struct xsk_ring_cons *rx = &rxq->rx;
302 struct xsk_umem_info *umem = rxq->umem;
303 struct xsk_ring_prod *fq = &umem->fq;
305 unsigned long rx_bytes = 0;
307 uint32_t free_thresh = fq->size >> 1;
308 struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
310 if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
313 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
315 #if defined(XDP_USE_NEED_WAKEUP)
316 if (xsk_ring_prod__needs_wakeup(fq))
317 (void)poll(rxq->fds, 1, 1000);
323 if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
324 (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL);
326 for (i = 0; i < rcvd; i++) {
327 const struct xdp_desc *desc;
332 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
335 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
337 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
338 rte_ring_enqueue(umem->buf_ring, (void *)addr);
339 rte_pktmbuf_pkt_len(mbufs[i]) = len;
340 rte_pktmbuf_data_len(mbufs[i]) = len;
345 xsk_ring_cons__release(rx, rcvd);
348 rxq->stats.rx_pkts += rcvd;
349 rxq->stats.rx_bytes += rx_bytes;
353 rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
361 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
363 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
365 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
366 return af_xdp_rx_zc(queue, bufs, nb_pkts);
368 return af_xdp_rx_cp(queue, bufs, nb_pkts);
373 pull_umem_cq(struct xsk_umem_info *umem, int size)
375 struct xsk_ring_cons *cq = &umem->cq;
379 n = xsk_ring_cons__peek(cq, size, &idx_cq);
381 for (i = 0; i < n; i++) {
383 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
384 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
385 addr = xsk_umem__extract_addr(addr);
386 rte_pktmbuf_free((struct rte_mbuf *)
387 xsk_umem__get_data(umem->buffer, addr));
389 rte_ring_enqueue(umem->buf_ring, (void *)addr);
393 xsk_ring_cons__release(cq, n);
397 kick_tx(struct pkt_tx_queue *txq)
399 struct xsk_umem_info *umem = txq->umem;
401 #if defined(XDP_USE_NEED_WAKEUP)
402 if (xsk_ring_prod__needs_wakeup(&txq->tx))
404 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
405 0, MSG_DONTWAIT) < 0) {
406 /* some thing unexpected */
407 if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
410 /* pull from completion queue to leave more space */
412 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
414 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
415 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
419 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
421 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
423 struct pkt_tx_queue *txq = queue;
424 struct xsk_umem_info *umem = txq->umem;
425 struct rte_mbuf *mbuf;
426 unsigned long tx_bytes = 0;
430 struct xdp_desc *desc;
431 uint64_t addr, offset;
433 pull_umem_cq(umem, nb_pkts);
435 for (i = 0; i < nb_pkts; i++) {
438 if (mbuf->pool == umem->mb_pool) {
439 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
443 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
444 desc->len = mbuf->pkt_len;
445 addr = (uint64_t)mbuf - (uint64_t)umem->buffer;
446 offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
448 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
449 desc->addr = addr | offset;
452 struct rte_mbuf *local_mbuf =
453 rte_pktmbuf_alloc(umem->mb_pool);
456 if (local_mbuf == NULL)
459 if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
460 rte_pktmbuf_free(local_mbuf);
465 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
466 desc->len = mbuf->pkt_len;
468 addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer;
469 offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
470 (uint64_t)local_mbuf;
471 pkt = xsk_umem__get_data(umem->buffer, addr + offset);
472 offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
473 desc->addr = addr | offset;
474 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
476 rte_pktmbuf_free(mbuf);
480 tx_bytes += mbuf->pkt_len;
483 #if defined(XDP_USE_NEED_WAKEUP)
484 if (xsk_ring_prod__needs_wakeup(&txq->tx))
489 xsk_ring_prod__submit(&txq->tx, count);
491 txq->stats.tx_pkts += count;
492 txq->stats.tx_bytes += tx_bytes;
493 txq->stats.tx_dropped += nb_pkts - count;
499 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
501 struct pkt_tx_queue *txq = queue;
502 struct xsk_umem_info *umem = txq->umem;
503 struct rte_mbuf *mbuf;
504 void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
505 unsigned long tx_bytes = 0;
509 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
511 pull_umem_cq(umem, nb_pkts);
513 nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
518 if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
520 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
524 for (i = 0; i < nb_pkts; i++) {
525 struct xdp_desc *desc;
528 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
530 desc->len = mbuf->pkt_len;
532 desc->addr = (uint64_t)addrs[i];
533 pkt = xsk_umem__get_data(umem->mz->addr,
535 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
536 tx_bytes += mbuf->pkt_len;
537 rte_pktmbuf_free(mbuf);
540 xsk_ring_prod__submit(&txq->tx, nb_pkts);
544 txq->stats.tx_pkts += nb_pkts;
545 txq->stats.tx_bytes += tx_bytes;
552 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
554 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
555 return af_xdp_tx_zc(queue, bufs, nb_pkts);
557 return af_xdp_tx_cp(queue, bufs, nb_pkts);
562 eth_dev_start(struct rte_eth_dev *dev)
564 dev->data->dev_link.link_status = ETH_LINK_UP;
569 /* This function gets called when the current port gets stopped. */
571 eth_dev_stop(struct rte_eth_dev *dev)
573 dev->data->dev_link.link_status = ETH_LINK_DOWN;
577 eth_dev_configure(struct rte_eth_dev *dev)
579 /* rx/tx must be paired */
580 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
587 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
589 struct pmd_internals *internals = dev->data->dev_private;
591 dev_info->if_index = internals->if_index;
592 dev_info->max_mac_addrs = 1;
593 dev_info->max_rx_pktlen = ETH_FRAME_LEN;
594 dev_info->max_rx_queues = internals->queue_cnt;
595 dev_info->max_tx_queues = internals->queue_cnt;
597 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
598 dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
600 dev_info->default_rxportconf.nb_queues = 1;
601 dev_info->default_txportconf.nb_queues = 1;
602 dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
603 dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
609 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
611 struct pmd_internals *internals = dev->data->dev_private;
612 struct xdp_statistics xdp_stats;
613 struct pkt_rx_queue *rxq;
614 struct pkt_tx_queue *txq;
618 for (i = 0; i < dev->data->nb_rx_queues; i++) {
619 optlen = sizeof(struct xdp_statistics);
620 rxq = &internals->rx_queues[i];
622 stats->q_ipackets[i] = rxq->stats.rx_pkts;
623 stats->q_ibytes[i] = rxq->stats.rx_bytes;
625 stats->q_opackets[i] = txq->stats.tx_pkts;
626 stats->q_obytes[i] = txq->stats.tx_bytes;
628 stats->ipackets += stats->q_ipackets[i];
629 stats->ibytes += stats->q_ibytes[i];
630 stats->imissed += rxq->stats.rx_dropped;
631 stats->oerrors += txq->stats.tx_dropped;
632 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
633 XDP_STATISTICS, &xdp_stats, &optlen);
635 AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
638 stats->imissed += xdp_stats.rx_dropped;
640 stats->opackets += stats->q_opackets[i];
641 stats->obytes += stats->q_obytes[i];
648 eth_stats_reset(struct rte_eth_dev *dev)
650 struct pmd_internals *internals = dev->data->dev_private;
653 for (i = 0; i < internals->queue_cnt; i++) {
654 memset(&internals->rx_queues[i].stats, 0,
655 sizeof(struct rx_stats));
656 memset(&internals->tx_queues[i].stats, 0,
657 sizeof(struct tx_stats));
664 remove_xdp_program(struct pmd_internals *internals)
666 uint32_t curr_prog_id = 0;
668 if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
669 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
670 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
673 bpf_set_link_xdp_fd(internals->if_index, -1,
674 XDP_FLAGS_UPDATE_IF_NOEXIST);
678 xdp_umem_destroy(struct xsk_umem_info *umem)
680 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
681 rte_mempool_free(umem->mb_pool);
682 umem->mb_pool = NULL;
684 rte_memzone_free(umem->mz);
687 rte_ring_free(umem->buf_ring);
688 umem->buf_ring = NULL;
696 eth_dev_close(struct rte_eth_dev *dev)
698 struct pmd_internals *internals = dev->data->dev_private;
699 struct pkt_rx_queue *rxq;
702 AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
705 for (i = 0; i < internals->queue_cnt; i++) {
706 rxq = &internals->rx_queues[i];
707 if (rxq->umem == NULL)
709 xsk_socket__delete(rxq->xsk);
710 (void)xsk_umem__delete(rxq->umem->umem);
711 xdp_umem_destroy(rxq->umem);
713 /* free pkt_tx_queue */
719 * MAC is not allocated dynamically, setting it to NULL would prevent
720 * from releasing it in rte_eth_dev_release_port.
722 dev->data->mac_addrs = NULL;
724 remove_xdp_program(internals);
728 eth_queue_release(void *q __rte_unused)
733 eth_link_update(struct rte_eth_dev *dev __rte_unused,
734 int wait_to_complete __rte_unused)
739 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
740 static inline uint64_t get_base_addr(struct rte_mempool *mp)
742 struct rte_mempool_memhdr *memhdr;
744 memhdr = STAILQ_FIRST(&mp->mem_list);
745 return (uint64_t)memhdr->addr & ~(getpagesize() - 1);
749 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals __rte_unused,
750 struct pkt_rx_queue *rxq)
752 struct xsk_umem_info *umem;
754 struct xsk_umem_config usr_config = {
755 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
756 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
757 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
758 void *base_addr = NULL;
759 struct rte_mempool *mb_pool = rxq->mb_pool;
761 usr_config.frame_size = rte_pktmbuf_data_room_size(mb_pool) +
762 ETH_AF_XDP_MBUF_OVERHEAD +
763 mb_pool->private_data_size;
764 usr_config.frame_headroom = ETH_AF_XDP_DATA_HEADROOM +
765 mb_pool->private_data_size;
767 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
769 AF_XDP_LOG(ERR, "Failed to allocate umem info");
773 umem->mb_pool = mb_pool;
774 base_addr = (void *)get_base_addr(mb_pool);
776 ret = xsk_umem__create(&umem->umem, base_addr,
777 mb_pool->populated_size * usr_config.frame_size,
778 &umem->fq, &umem->cq,
782 AF_XDP_LOG(ERR, "Failed to create umem");
785 umem->buffer = base_addr;
789 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
790 struct pkt_rx_queue *rxq)
792 struct xsk_umem_info *umem;
793 const struct rte_memzone *mz;
794 struct xsk_umem_config usr_config = {
795 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
796 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
797 .frame_size = ETH_AF_XDP_FRAME_SIZE,
798 .frame_headroom = ETH_AF_XDP_DATA_HEADROOM };
799 char ring_name[RTE_RING_NAMESIZE];
800 char mz_name[RTE_MEMZONE_NAMESIZE];
804 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
806 AF_XDP_LOG(ERR, "Failed to allocate umem info");
810 snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
811 internals->if_name, rxq->xsk_queue_idx);
812 umem->buf_ring = rte_ring_create(ring_name,
813 ETH_AF_XDP_NUM_BUFFERS,
816 if (umem->buf_ring == NULL) {
817 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
821 for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
822 rte_ring_enqueue(umem->buf_ring,
823 (void *)(i * ETH_AF_XDP_FRAME_SIZE +
824 ETH_AF_XDP_DATA_HEADROOM));
826 snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
827 internals->if_name, rxq->xsk_queue_idx);
828 mz = rte_memzone_reserve_aligned(mz_name,
829 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
830 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
833 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
837 ret = xsk_umem__create(&umem->umem, mz->addr,
838 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
839 &umem->fq, &umem->cq,
843 AF_XDP_LOG(ERR, "Failed to create umem");
852 xdp_umem_destroy(umem);
857 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
860 struct xsk_socket_config cfg;
861 struct pkt_tx_queue *txq = rxq->pair;
863 int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS / 2;
864 struct rte_mbuf *fq_bufs[reserve_size];
866 rxq->umem = xdp_umem_configure(internals, rxq);
867 if (rxq->umem == NULL)
869 txq->umem = rxq->umem;
871 cfg.rx_size = ring_size;
872 cfg.tx_size = ring_size;
873 cfg.libbpf_flags = 0;
874 cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
877 #if defined(XDP_USE_NEED_WAKEUP)
878 cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
881 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
882 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
885 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
889 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
890 if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) {
891 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
895 ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs);
897 xsk_socket__delete(rxq->xsk);
898 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
905 xdp_umem_destroy(rxq->umem);
911 eth_rx_queue_setup(struct rte_eth_dev *dev,
912 uint16_t rx_queue_id,
914 unsigned int socket_id __rte_unused,
915 const struct rte_eth_rxconf *rx_conf __rte_unused,
916 struct rte_mempool *mb_pool)
918 struct pmd_internals *internals = dev->data->dev_private;
919 struct pkt_rx_queue *rxq;
922 rxq = &internals->rx_queues[rx_queue_id];
924 AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
925 rx_queue_id, rxq->xsk_queue_idx);
927 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
928 uint32_t buf_size, data_size;
930 /* Now get the space available for data in the mbuf */
931 buf_size = rte_pktmbuf_data_room_size(mb_pool) -
932 RTE_PKTMBUF_HEADROOM;
933 data_size = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
935 if (data_size > buf_size) {
936 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
937 dev->device->name, data_size, buf_size);
943 rxq->mb_pool = mb_pool;
945 if (xsk_configure(internals, rxq, nb_rx_desc)) {
946 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
951 rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
952 rxq->fds[0].events = POLLIN;
954 dev->data->rx_queues[rx_queue_id] = rxq;
962 eth_tx_queue_setup(struct rte_eth_dev *dev,
963 uint16_t tx_queue_id,
964 uint16_t nb_tx_desc __rte_unused,
965 unsigned int socket_id __rte_unused,
966 const struct rte_eth_txconf *tx_conf __rte_unused)
968 struct pmd_internals *internals = dev->data->dev_private;
969 struct pkt_tx_queue *txq;
971 txq = &internals->tx_queues[tx_queue_id];
973 dev->data->tx_queues[tx_queue_id] = txq;
978 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
980 struct pmd_internals *internals = dev->data->dev_private;
981 struct ifreq ifr = { .ifr_mtu = mtu };
985 s = socket(PF_INET, SOCK_DGRAM, 0);
989 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
990 ret = ioctl(s, SIOCSIFMTU, &ifr);
993 return (ret < 0) ? -errno : 0;
997 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1003 s = socket(PF_INET, SOCK_DGRAM, 0);
1007 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1008 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1012 ifr.ifr_flags &= mask;
1013 ifr.ifr_flags |= flags;
1014 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1024 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1026 struct pmd_internals *internals = dev->data->dev_private;
1028 return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1032 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1034 struct pmd_internals *internals = dev->data->dev_private;
1036 return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1039 static const struct eth_dev_ops ops = {
1040 .dev_start = eth_dev_start,
1041 .dev_stop = eth_dev_stop,
1042 .dev_close = eth_dev_close,
1043 .dev_configure = eth_dev_configure,
1044 .dev_infos_get = eth_dev_info,
1045 .mtu_set = eth_dev_mtu_set,
1046 .promiscuous_enable = eth_dev_promiscuous_enable,
1047 .promiscuous_disable = eth_dev_promiscuous_disable,
1048 .rx_queue_setup = eth_rx_queue_setup,
1049 .tx_queue_setup = eth_tx_queue_setup,
1050 .rx_queue_release = eth_queue_release,
1051 .tx_queue_release = eth_queue_release,
1052 .link_update = eth_link_update,
1053 .stats_get = eth_stats_get,
1054 .stats_reset = eth_stats_reset,
1057 /** parse integer from integer argument */
1059 parse_integer_arg(const char *key __rte_unused,
1060 const char *value, void *extra_args)
1062 int *i = (int *)extra_args;
1065 *i = strtol(value, &end, 10);
1067 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1074 /** parse name argument */
1076 parse_name_arg(const char *key __rte_unused,
1077 const char *value, void *extra_args)
1079 char *name = extra_args;
1081 if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1082 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1087 strlcpy(name, value, IFNAMSIZ);
1093 xdp_get_channels_info(const char *if_name, int *max_queues,
1094 int *combined_queues)
1096 struct ethtool_channels channels;
1100 fd = socket(AF_INET, SOCK_DGRAM, 0);
1104 channels.cmd = ETHTOOL_GCHANNELS;
1105 ifr.ifr_data = (void *)&channels;
1106 strncpy(ifr.ifr_name, if_name, IFNAMSIZ);
1107 ret = ioctl(fd, SIOCETHTOOL, &ifr);
1109 if (errno == EOPNOTSUPP) {
1117 if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1118 /* If the device says it has no channels, then all traffic
1119 * is sent to a single stream, so max queues = 1.
1122 *combined_queues = 1;
1124 *max_queues = channels.max_combined;
1125 *combined_queues = channels.combined_count;
1134 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1139 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1140 &parse_name_arg, if_name);
1144 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1145 &parse_integer_arg, start_queue);
1149 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1150 &parse_integer_arg, queue_cnt);
1151 if (ret < 0 || *queue_cnt <= 0) {
1157 rte_kvargs_free(kvlist);
1162 get_iface_info(const char *if_name,
1163 struct rte_ether_addr *eth_addr,
1167 int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1172 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1173 if (ioctl(sock, SIOCGIFINDEX, &ifr))
1176 *if_index = ifr.ifr_ifindex;
1178 if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1181 rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1191 static struct rte_eth_dev *
1192 init_internals(struct rte_vdev_device *dev, const char *if_name,
1193 int start_queue_idx, int queue_cnt)
1195 const char *name = rte_vdev_device_name(dev);
1196 const unsigned int numa_node = dev->device.numa_node;
1197 struct pmd_internals *internals;
1198 struct rte_eth_dev *eth_dev;
1202 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1203 if (internals == NULL)
1206 internals->start_queue_idx = start_queue_idx;
1207 internals->queue_cnt = queue_cnt;
1208 strlcpy(internals->if_name, if_name, IFNAMSIZ);
1210 if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1211 &internals->combined_queue_cnt)) {
1212 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1214 goto err_free_internals;
1217 if (queue_cnt > internals->combined_queue_cnt) {
1218 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1219 queue_cnt, internals->combined_queue_cnt);
1220 goto err_free_internals;
1223 internals->rx_queues = rte_zmalloc_socket(NULL,
1224 sizeof(struct pkt_rx_queue) * queue_cnt,
1226 if (internals->rx_queues == NULL) {
1227 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1228 goto err_free_internals;
1231 internals->tx_queues = rte_zmalloc_socket(NULL,
1232 sizeof(struct pkt_tx_queue) * queue_cnt,
1234 if (internals->tx_queues == NULL) {
1235 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1238 for (i = 0; i < queue_cnt; i++) {
1239 internals->tx_queues[i].pair = &internals->rx_queues[i];
1240 internals->rx_queues[i].pair = &internals->tx_queues[i];
1241 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1242 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1245 ret = get_iface_info(if_name, &internals->eth_addr,
1246 &internals->if_index);
1250 eth_dev = rte_eth_vdev_allocate(dev, 0);
1251 if (eth_dev == NULL)
1254 eth_dev->data->dev_private = internals;
1255 eth_dev->data->dev_link = pmd_link;
1256 eth_dev->data->mac_addrs = &internals->eth_addr;
1257 eth_dev->dev_ops = &ops;
1258 eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1259 eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1260 /* Let rte_eth_dev_close() release the port resources. */
1261 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1263 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1264 AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1270 rte_free(internals->tx_queues);
1272 rte_free(internals->rx_queues);
1274 rte_free(internals);
1279 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1281 struct rte_kvargs *kvlist;
1282 char if_name[IFNAMSIZ] = {'\0'};
1283 int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1284 int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1285 struct rte_eth_dev *eth_dev = NULL;
1288 AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1289 rte_vdev_device_name(dev));
1291 name = rte_vdev_device_name(dev);
1292 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1293 strlen(rte_vdev_device_args(dev)) == 0) {
1294 eth_dev = rte_eth_dev_attach_secondary(name);
1295 if (eth_dev == NULL) {
1296 AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1299 eth_dev->dev_ops = &ops;
1300 rte_eth_dev_probing_finish(eth_dev);
1304 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1305 if (kvlist == NULL) {
1306 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1310 if (dev->device.numa_node == SOCKET_ID_ANY)
1311 dev->device.numa_node = rte_socket_id();
1313 if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1314 &xsk_queue_cnt) < 0) {
1315 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1319 if (strlen(if_name) == 0) {
1320 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1324 eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1326 if (eth_dev == NULL) {
1327 AF_XDP_LOG(ERR, "Failed to init internals\n");
1331 rte_eth_dev_probing_finish(eth_dev);
1337 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1339 struct rte_eth_dev *eth_dev = NULL;
1341 AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1347 /* find the ethdev entry */
1348 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1349 if (eth_dev == NULL)
1352 eth_dev_close(eth_dev);
1353 rte_eth_dev_release_port(eth_dev);
1359 static struct rte_vdev_driver pmd_af_xdp_drv = {
1360 .probe = rte_pmd_af_xdp_probe,
1361 .remove = rte_pmd_af_xdp_remove,
1364 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1365 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1367 "start_queue=<int> "
1368 "queue_count=<int> ");
1370 RTE_INIT(af_xdp_init_log)
1372 af_xdp_logtype = rte_log_register("pmd.net.af_xdp");
1373 if (af_xdp_logtype >= 0)
1374 rte_log_set_level(af_xdp_logtype, RTE_LOG_NOTICE);