1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
8 #include <netinet/in.h>
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_xdp.h>
14 #include <linux/if_link.h>
15 #include "af_xdp_deps.h"
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_vdev.h>
21 #include <rte_kvargs.h>
22 #include <rte_bus_vdev.h>
23 #include <rte_string_fns.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_common.h>
26 #include <rte_config.h>
29 #include <rte_ether.h>
30 #include <rte_lcore.h>
32 #include <rte_memory.h>
33 #include <rte_memzone.h>
35 #include <rte_malloc.h>
50 static int af_xdp_logtype;
52 #define AF_XDP_LOG(level, fmt, args...) \
53 rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
54 "%s(): " fmt, __func__, ##args)
56 #define ETH_AF_XDP_FRAME_SIZE XSK_UMEM__DEFAULT_FRAME_SIZE
57 #define ETH_AF_XDP_NUM_BUFFERS 4096
58 #define ETH_AF_XDP_DATA_HEADROOM 0
59 #define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS
60 #define ETH_AF_XDP_DFLT_QUEUE_IDX 0
62 #define ETH_AF_XDP_RX_BATCH_SIZE 32
63 #define ETH_AF_XDP_TX_BATCH_SIZE 32
65 #define ETH_AF_XDP_MAX_QUEUE_PAIRS 16
67 struct xsk_umem_info {
68 struct xsk_ring_prod fq;
69 struct xsk_ring_cons cq;
70 struct xsk_umem *umem;
71 struct rte_ring *buf_ring;
72 const struct rte_memzone *mz;
83 struct xsk_ring_cons rx;
84 struct xsk_umem_info *umem;
85 struct xsk_socket *xsk;
86 struct rte_mempool *mb_pool;
88 struct rx_stats stats;
90 struct pkt_tx_queue *pair;
100 struct pkt_tx_queue {
101 struct xsk_ring_prod tx;
103 struct tx_stats stats;
105 struct pkt_rx_queue *pair;
109 struct pmd_internals {
111 char if_name[IFNAMSIZ];
114 struct rte_ether_addr eth_addr;
115 struct rte_mempool *mb_pool_share;
117 struct pkt_rx_queue rx_queues[ETH_AF_XDP_MAX_QUEUE_PAIRS];
118 struct pkt_tx_queue tx_queues[ETH_AF_XDP_MAX_QUEUE_PAIRS];
121 #define ETH_AF_XDP_IFACE_ARG "iface"
122 #define ETH_AF_XDP_QUEUE_IDX_ARG "queue"
123 #define ETH_AF_XDP_PMD_ZC_ARG "pmd_zero_copy"
125 static const char * const valid_arguments[] = {
126 ETH_AF_XDP_IFACE_ARG,
127 ETH_AF_XDP_QUEUE_IDX_ARG,
128 ETH_AF_XDP_PMD_ZC_ARG,
132 static const struct rte_eth_link pmd_link = {
133 .link_speed = ETH_SPEED_NUM_10G,
134 .link_duplex = ETH_LINK_FULL_DUPLEX,
135 .link_status = ETH_LINK_DOWN,
136 .link_autoneg = ETH_LINK_AUTONEG
140 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size)
142 struct xsk_ring_prod *fq = &umem->fq;
143 void *addrs[reserve_size];
147 if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
149 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
153 if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
154 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
155 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
160 for (i = 0; i < reserve_size; i++) {
163 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
164 *fq_addr = (uint64_t)addrs[i];
167 xsk_ring_prod__submit(fq, reserve_size);
173 umem_buf_release_to_fq(void *addr, void *opaque)
175 struct xsk_umem_info *umem = (struct xsk_umem_info *)opaque;
176 uint64_t umem_addr = (uint64_t)addr - umem->mz->addr_64;
178 rte_ring_enqueue(umem->buf_ring, (void *)umem_addr);
182 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
184 struct pkt_rx_queue *rxq = queue;
185 struct xsk_ring_cons *rx = &rxq->rx;
186 struct xsk_umem_info *umem = rxq->umem;
187 struct xsk_ring_prod *fq = &umem->fq;
189 uint32_t free_thresh = fq->size >> 1;
190 int pmd_zc = umem->pmd_zc;
191 struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
192 unsigned long dropped = 0;
193 unsigned long rx_bytes = 0;
196 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
198 if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
201 rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
205 if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
206 (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE);
208 for (i = 0; i < rcvd; i++) {
209 const struct xdp_desc *desc;
213 uint16_t buf_len = ETH_AF_XDP_FRAME_SIZE;
214 struct rte_mbuf_ext_shared_info *shinfo;
216 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
219 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
222 shinfo = rte_pktmbuf_ext_shinfo_init_helper(pkt,
223 &buf_len, umem_buf_release_to_fq, umem);
225 rte_pktmbuf_attach_extbuf(mbufs[i], pkt, 0, buf_len,
228 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *),
230 rte_ring_enqueue(umem->buf_ring, (void *)addr);
232 rte_pktmbuf_pkt_len(mbufs[i]) = len;
233 rte_pktmbuf_data_len(mbufs[i]) = len;
238 xsk_ring_cons__release(rx, rcvd);
241 rxq->stats.rx_pkts += (rcvd - dropped);
242 rxq->stats.rx_bytes += rx_bytes;
246 rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
253 pull_umem_cq(struct xsk_umem_info *umem, int size)
255 struct xsk_ring_cons *cq = &umem->cq;
259 n = xsk_ring_cons__peek(cq, size, &idx_cq);
261 for (i = 0; i < n; i++) {
263 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
264 rte_ring_enqueue(umem->buf_ring, (void *)addr);
267 xsk_ring_cons__release(cq, n);
271 kick_tx(struct pkt_tx_queue *txq)
273 struct xsk_umem_info *umem = txq->pair->umem;
275 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
276 0, MSG_DONTWAIT) < 0) {
277 /* some thing unexpected */
278 if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
281 /* pull from completion queue to leave more space */
283 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
285 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
289 in_umem_range(struct xsk_umem_info *umem, uint64_t addr)
291 uint64_t mz_base_addr = umem->mz->addr_64;
293 return addr >= mz_base_addr && addr < mz_base_addr + umem->mz->len;
297 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
299 struct pkt_tx_queue *txq = queue;
300 struct xsk_umem_info *umem = txq->pair->umem;
301 struct rte_mbuf *mbuf;
302 int pmd_zc = umem->pmd_zc;
303 void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
304 unsigned long tx_bytes = 0;
308 nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
310 pull_umem_cq(umem, nb_pkts);
312 nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
317 if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
319 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
323 for (i = 0; i < nb_pkts; i++) {
324 struct xdp_desc *desc;
327 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
329 desc->len = mbuf->pkt_len;
332 * We need to make sure the external mbuf address is within
333 * current port's umem memzone range
335 if (pmd_zc && RTE_MBUF_HAS_EXTBUF(mbuf) &&
336 in_umem_range(umem, (uint64_t)mbuf->buf_addr)) {
337 desc->addr = (uint64_t)mbuf->buf_addr -
339 mbuf->buf_addr = xsk_umem__get_data(umem->mz->addr,
342 desc->addr = (uint64_t)addrs[i];
343 pkt = xsk_umem__get_data(umem->mz->addr,
345 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
348 tx_bytes += mbuf->pkt_len;
351 xsk_ring_prod__submit(&txq->tx, nb_pkts);
355 txq->stats.tx_pkts += nb_pkts;
356 txq->stats.tx_bytes += tx_bytes;
358 for (i = 0; i < nb_pkts; i++)
359 rte_pktmbuf_free(bufs[i]);
365 eth_dev_start(struct rte_eth_dev *dev)
367 dev->data->dev_link.link_status = ETH_LINK_UP;
372 /* This function gets called when the current port gets stopped. */
374 eth_dev_stop(struct rte_eth_dev *dev)
376 dev->data->dev_link.link_status = ETH_LINK_DOWN;
380 eth_dev_configure(struct rte_eth_dev *dev)
382 /* rx/tx must be paired */
383 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
390 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
392 struct pmd_internals *internals = dev->data->dev_private;
394 dev_info->if_index = internals->if_index;
395 dev_info->max_mac_addrs = 1;
396 dev_info->max_rx_pktlen = ETH_FRAME_LEN;
397 dev_info->max_rx_queues = 1;
398 dev_info->max_tx_queues = 1;
400 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
401 dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
403 dev_info->default_rxportconf.nb_queues = 1;
404 dev_info->default_txportconf.nb_queues = 1;
405 dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
406 dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
410 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
412 struct pmd_internals *internals = dev->data->dev_private;
413 struct xdp_statistics xdp_stats;
414 struct pkt_rx_queue *rxq;
418 for (i = 0; i < dev->data->nb_rx_queues; i++) {
419 optlen = sizeof(struct xdp_statistics);
420 rxq = &internals->rx_queues[i];
421 stats->q_ipackets[i] = internals->rx_queues[i].stats.rx_pkts;
422 stats->q_ibytes[i] = internals->rx_queues[i].stats.rx_bytes;
424 stats->q_opackets[i] = internals->tx_queues[i].stats.tx_pkts;
425 stats->q_obytes[i] = internals->tx_queues[i].stats.tx_bytes;
427 stats->ipackets += stats->q_ipackets[i];
428 stats->ibytes += stats->q_ibytes[i];
429 stats->imissed += internals->rx_queues[i].stats.rx_dropped;
430 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
431 XDP_STATISTICS, &xdp_stats, &optlen);
433 AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
436 stats->imissed += xdp_stats.rx_dropped;
438 stats->opackets += stats->q_opackets[i];
439 stats->oerrors += internals->tx_queues[i].stats.err_pkts;
440 stats->obytes += stats->q_obytes[i];
447 eth_stats_reset(struct rte_eth_dev *dev)
449 struct pmd_internals *internals = dev->data->dev_private;
452 for (i = 0; i < ETH_AF_XDP_MAX_QUEUE_PAIRS; i++) {
453 memset(&internals->rx_queues[i].stats, 0,
454 sizeof(struct rx_stats));
455 memset(&internals->tx_queues[i].stats, 0,
456 sizeof(struct tx_stats));
461 remove_xdp_program(struct pmd_internals *internals)
463 uint32_t curr_prog_id = 0;
465 if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
466 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
467 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
470 bpf_set_link_xdp_fd(internals->if_index, -1,
471 XDP_FLAGS_UPDATE_IF_NOEXIST);
475 xdp_umem_destroy(struct xsk_umem_info *umem)
477 rte_memzone_free(umem->mz);
480 rte_ring_free(umem->buf_ring);
481 umem->buf_ring = NULL;
488 eth_dev_close(struct rte_eth_dev *dev)
490 struct pmd_internals *internals = dev->data->dev_private;
491 struct pkt_rx_queue *rxq;
494 AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
497 for (i = 0; i < ETH_AF_XDP_MAX_QUEUE_PAIRS; i++) {
498 rxq = &internals->rx_queues[i];
499 if (rxq->umem == NULL)
501 xsk_socket__delete(rxq->xsk);
502 (void)xsk_umem__delete(rxq->umem->umem);
503 xdp_umem_destroy(rxq->umem);
507 * MAC is not allocated dynamically, setting it to NULL would prevent
508 * from releasing it in rte_eth_dev_release_port.
510 dev->data->mac_addrs = NULL;
512 remove_xdp_program(internals);
516 eth_queue_release(void *q __rte_unused)
521 eth_link_update(struct rte_eth_dev *dev __rte_unused,
522 int wait_to_complete __rte_unused)
528 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals)
530 struct xsk_umem_info *umem;
531 const struct rte_memzone *mz;
532 struct xsk_umem_config usr_config = {
533 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
534 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
535 .frame_size = ETH_AF_XDP_FRAME_SIZE,
536 .frame_headroom = ETH_AF_XDP_DATA_HEADROOM };
537 char ring_name[RTE_RING_NAMESIZE];
538 char mz_name[RTE_MEMZONE_NAMESIZE];
542 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
544 AF_XDP_LOG(ERR, "Failed to allocate umem info");
548 snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
549 internals->if_name, internals->queue_idx);
550 umem->buf_ring = rte_ring_create(ring_name,
551 ETH_AF_XDP_NUM_BUFFERS,
554 if (umem->buf_ring == NULL) {
555 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
559 for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
560 rte_ring_enqueue(umem->buf_ring,
561 (void *)(i * ETH_AF_XDP_FRAME_SIZE +
562 ETH_AF_XDP_DATA_HEADROOM));
564 snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
565 internals->if_name, internals->queue_idx);
566 mz = rte_memzone_reserve_aligned(mz_name,
567 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
568 rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
571 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
575 ret = xsk_umem__create(&umem->umem, mz->addr,
576 ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
577 &umem->fq, &umem->cq,
581 AF_XDP_LOG(ERR, "Failed to create umem");
589 xdp_umem_destroy(umem);
594 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
597 struct xsk_socket_config cfg;
598 struct pkt_tx_queue *txq = rxq->pair;
602 rxq->umem = xdp_umem_configure(internals);
603 if (rxq->umem == NULL)
606 cfg.rx_size = ring_size;
607 cfg.tx_size = ring_size;
608 cfg.libbpf_flags = 0;
609 cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
611 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
612 internals->queue_idx, rxq->umem->umem, &rxq->rx,
615 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
619 reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS / 2;
620 ret = reserve_fill_queue(rxq->umem, reserve_size);
622 xsk_socket__delete(rxq->xsk);
623 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
630 xdp_umem_destroy(rxq->umem);
636 queue_reset(struct pmd_internals *internals, uint16_t queue_idx)
638 struct pkt_rx_queue *rxq = &internals->rx_queues[queue_idx];
639 struct pkt_tx_queue *txq = rxq->pair;
641 memset(rxq, 0, sizeof(*rxq));
642 memset(txq, 0, sizeof(*txq));
645 rxq->queue_idx = queue_idx;
646 txq->queue_idx = queue_idx;
650 eth_rx_queue_setup(struct rte_eth_dev *dev,
651 uint16_t rx_queue_id,
653 unsigned int socket_id __rte_unused,
654 const struct rte_eth_rxconf *rx_conf __rte_unused,
655 struct rte_mempool *mb_pool)
657 struct pmd_internals *internals = dev->data->dev_private;
658 uint32_t buf_size, data_size;
659 struct pkt_rx_queue *rxq;
662 rxq = &internals->rx_queues[rx_queue_id];
663 queue_reset(internals, rx_queue_id);
665 /* Now get the space available for data in the mbuf */
666 buf_size = rte_pktmbuf_data_room_size(mb_pool) -
667 RTE_PKTMBUF_HEADROOM;
668 data_size = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
670 if (data_size > buf_size) {
671 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
672 dev->device->name, data_size, buf_size);
677 rxq->mb_pool = mb_pool;
679 if (xsk_configure(internals, rxq, nb_rx_desc)) {
680 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
685 rxq->umem->pmd_zc = internals->pmd_zc;
687 dev->data->rx_queues[rx_queue_id] = rxq;
691 queue_reset(internals, rx_queue_id);
696 eth_tx_queue_setup(struct rte_eth_dev *dev,
697 uint16_t tx_queue_id,
698 uint16_t nb_tx_desc __rte_unused,
699 unsigned int socket_id __rte_unused,
700 const struct rte_eth_txconf *tx_conf __rte_unused)
702 struct pmd_internals *internals = dev->data->dev_private;
703 struct pkt_tx_queue *txq;
705 txq = &internals->tx_queues[tx_queue_id];
707 dev->data->tx_queues[tx_queue_id] = txq;
712 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
714 struct pmd_internals *internals = dev->data->dev_private;
715 struct ifreq ifr = { .ifr_mtu = mtu };
719 s = socket(PF_INET, SOCK_DGRAM, 0);
723 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
724 ret = ioctl(s, SIOCSIFMTU, &ifr);
727 return (ret < 0) ? -errno : 0;
731 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
736 s = socket(PF_INET, SOCK_DGRAM, 0);
740 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
741 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0)
743 ifr.ifr_flags &= mask;
744 ifr.ifr_flags |= flags;
745 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0)
752 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
754 struct pmd_internals *internals = dev->data->dev_private;
756 eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
760 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
762 struct pmd_internals *internals = dev->data->dev_private;
764 eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
767 static const struct eth_dev_ops ops = {
768 .dev_start = eth_dev_start,
769 .dev_stop = eth_dev_stop,
770 .dev_close = eth_dev_close,
771 .dev_configure = eth_dev_configure,
772 .dev_infos_get = eth_dev_info,
773 .mtu_set = eth_dev_mtu_set,
774 .promiscuous_enable = eth_dev_promiscuous_enable,
775 .promiscuous_disable = eth_dev_promiscuous_disable,
776 .rx_queue_setup = eth_rx_queue_setup,
777 .tx_queue_setup = eth_tx_queue_setup,
778 .rx_queue_release = eth_queue_release,
779 .tx_queue_release = eth_queue_release,
780 .link_update = eth_link_update,
781 .stats_get = eth_stats_get,
782 .stats_reset = eth_stats_reset,
785 /** parse integer from integer argument */
787 parse_integer_arg(const char *key __rte_unused,
788 const char *value, void *extra_args)
790 int *i = (int *)extra_args;
793 *i = strtol(value, &end, 10);
795 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
802 /** parse name argument */
804 parse_name_arg(const char *key __rte_unused,
805 const char *value, void *extra_args)
807 char *name = extra_args;
809 if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
810 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
815 strlcpy(name, value, IFNAMSIZ);
821 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *queue_idx,
826 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
827 &parse_name_arg, if_name);
831 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_IDX_ARG,
832 &parse_integer_arg, queue_idx);
836 ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PMD_ZC_ARG,
837 &parse_integer_arg, pmd_zc);
842 rte_kvargs_free(kvlist);
847 get_iface_info(const char *if_name,
848 struct rte_ether_addr *eth_addr,
852 int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
857 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
858 if (ioctl(sock, SIOCGIFINDEX, &ifr))
861 *if_index = ifr.ifr_ifindex;
863 if (ioctl(sock, SIOCGIFHWADDR, &ifr))
866 rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
876 static struct rte_eth_dev *
877 init_internals(struct rte_vdev_device *dev, const char *if_name, int queue_idx,
880 const char *name = rte_vdev_device_name(dev);
881 const unsigned int numa_node = dev->device.numa_node;
882 struct pmd_internals *internals;
883 struct rte_eth_dev *eth_dev;
887 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
888 if (internals == NULL)
891 internals->queue_idx = queue_idx;
892 internals->pmd_zc = pmd_zc;
893 strlcpy(internals->if_name, if_name, IFNAMSIZ);
895 for (i = 0; i < ETH_AF_XDP_MAX_QUEUE_PAIRS; i++) {
896 internals->tx_queues[i].pair = &internals->rx_queues[i];
897 internals->rx_queues[i].pair = &internals->tx_queues[i];
900 ret = get_iface_info(if_name, &internals->eth_addr,
901 &internals->if_index);
905 eth_dev = rte_eth_vdev_allocate(dev, 0);
909 eth_dev->data->dev_private = internals;
910 eth_dev->data->dev_link = pmd_link;
911 eth_dev->data->mac_addrs = &internals->eth_addr;
912 eth_dev->dev_ops = &ops;
913 eth_dev->rx_pkt_burst = eth_af_xdp_rx;
914 eth_dev->tx_pkt_burst = eth_af_xdp_tx;
915 /* Let rte_eth_dev_close() release the port resources. */
916 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
918 if (internals->pmd_zc)
919 AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
929 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
931 struct rte_kvargs *kvlist;
932 char if_name[IFNAMSIZ] = {'\0'};
933 int xsk_queue_idx = ETH_AF_XDP_DFLT_QUEUE_IDX;
934 struct rte_eth_dev *eth_dev = NULL;
938 AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
939 rte_vdev_device_name(dev));
941 name = rte_vdev_device_name(dev);
942 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
943 strlen(rte_vdev_device_args(dev)) == 0) {
944 eth_dev = rte_eth_dev_attach_secondary(name);
945 if (eth_dev == NULL) {
946 AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
949 eth_dev->dev_ops = &ops;
950 rte_eth_dev_probing_finish(eth_dev);
954 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
955 if (kvlist == NULL) {
956 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
960 if (dev->device.numa_node == SOCKET_ID_ANY)
961 dev->device.numa_node = rte_socket_id();
963 if (parse_parameters(kvlist, if_name, &xsk_queue_idx, &pmd_zc) < 0) {
964 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
968 if (strlen(if_name) == 0) {
969 AF_XDP_LOG(ERR, "Network interface must be specified\n");
973 eth_dev = init_internals(dev, if_name, xsk_queue_idx, pmd_zc);
974 if (eth_dev == NULL) {
975 AF_XDP_LOG(ERR, "Failed to init internals\n");
979 rte_eth_dev_probing_finish(eth_dev);
985 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
987 struct rte_eth_dev *eth_dev = NULL;
989 AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
995 /* find the ethdev entry */
996 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1000 eth_dev_close(eth_dev);
1001 rte_eth_dev_release_port(eth_dev);
1007 static struct rte_vdev_driver pmd_af_xdp_drv = {
1008 .probe = rte_pmd_af_xdp_probe,
1009 .remove = rte_pmd_af_xdp_remove,
1012 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1013 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1016 "pmd_zero_copy=<0|1>");
1018 RTE_INIT(af_xdp_init_log)
1020 af_xdp_logtype = rte_log_register("pmd.net.af_xdp");
1021 if (af_xdp_logtype >= 0)
1022 rte_log_set_level(af_xdp_logtype, RTE_LOG_NOTICE);