1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014 John W. Linville <linville@tuxdriver.com>
3 * Originally based upon librte_pmd_pcap code:
4 * Copyright(c) 2010-2015 Intel Corporation.
5 * Copyright(c) 2014 6WIND S.A.
9 #include <rte_string_fns.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_vdev.h>
13 #include <rte_malloc.h>
14 #include <rte_kvargs.h>
15 #include <rte_bus_vdev.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_packet.h>
20 #include <arpa/inet.h>
22 #include <sys/types.h>
23 #include <sys/socket.h>
24 #include <sys/ioctl.h>
30 #define ETH_AF_PACKET_IFACE_ARG "iface"
31 #define ETH_AF_PACKET_NUM_Q_ARG "qpairs"
32 #define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz"
33 #define ETH_AF_PACKET_FRAMESIZE_ARG "framesz"
34 #define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt"
35 #define ETH_AF_PACKET_QDISC_BYPASS_ARG "qdisc_bypass"
37 #define DFLT_FRAME_SIZE (1 << 11)
38 #define DFLT_FRAME_COUNT (1 << 9)
40 #define RTE_PMD_AF_PACKET_MAX_RINGS 16
47 unsigned int framecount;
48 unsigned int framenum;
50 struct rte_mempool *mb_pool;
53 volatile unsigned long rx_pkts;
54 volatile unsigned long rx_bytes;
59 unsigned int frame_data_size;
63 unsigned int framecount;
64 unsigned int framenum;
66 volatile unsigned long tx_pkts;
67 volatile unsigned long err_pkts;
68 volatile unsigned long tx_bytes;
71 struct pmd_internals {
76 struct rte_ether_addr eth_addr;
78 struct tpacket_req req;
80 struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
81 struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
84 static const char *valid_arguments[] = {
85 ETH_AF_PACKET_IFACE_ARG,
86 ETH_AF_PACKET_NUM_Q_ARG,
87 ETH_AF_PACKET_BLOCKSIZE_ARG,
88 ETH_AF_PACKET_FRAMESIZE_ARG,
89 ETH_AF_PACKET_FRAMECOUNT_ARG,
90 ETH_AF_PACKET_QDISC_BYPASS_ARG,
94 static struct rte_eth_link pmd_link = {
95 .link_speed = ETH_SPEED_NUM_10G,
96 .link_duplex = ETH_LINK_FULL_DUPLEX,
97 .link_status = ETH_LINK_DOWN,
98 .link_autoneg = ETH_LINK_FIXED,
101 static int af_packet_logtype;
103 #define PMD_LOG(level, fmt, args...) \
104 rte_log(RTE_LOG_ ## level, af_packet_logtype, \
105 "%s(): " fmt "\n", __func__, ##args)
107 #define PMD_LOG_ERRNO(level, fmt, args...) \
108 rte_log(RTE_LOG_ ## level, af_packet_logtype, \
109 "%s(): " fmt ":%s\n", __func__, ##args, strerror(errno))
112 eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
115 struct tpacket2_hdr *ppd;
116 struct rte_mbuf *mbuf;
118 struct pkt_rx_queue *pkt_q = queue;
120 unsigned long num_rx_bytes = 0;
121 unsigned int framecount, framenum;
123 if (unlikely(nb_pkts == 0))
127 * Reads the given number of packets from the AF_PACKET socket one by
128 * one and copies the packet data into a newly allocated mbuf.
130 framecount = pkt_q->framecount;
131 framenum = pkt_q->framenum;
132 for (i = 0; i < nb_pkts; i++) {
133 /* point at the next incoming frame */
134 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
135 if ((ppd->tp_status & TP_STATUS_USER) == 0)
138 /* allocate the next mbuf */
139 mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool);
140 if (unlikely(mbuf == NULL))
143 /* packet will fit in the mbuf, go ahead and receive it */
144 rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) = ppd->tp_snaplen;
145 pbuf = (uint8_t *) ppd + ppd->tp_mac;
146 memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
148 /* check for vlan info */
149 if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
150 mbuf->vlan_tci = ppd->tp_vlan_tci;
151 mbuf->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
154 /* release incoming frame and advance ring buffer */
155 ppd->tp_status = TP_STATUS_KERNEL;
156 if (++framenum >= framecount)
158 mbuf->port = pkt_q->in_port;
160 /* account for the receive frame */
163 num_rx_bytes += mbuf->pkt_len;
165 pkt_q->framenum = framenum;
166 pkt_q->rx_pkts += num_rx;
167 pkt_q->rx_bytes += num_rx_bytes;
172 * Callback to handle sending packets through a real NIC.
175 eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
177 struct tpacket2_hdr *ppd;
178 struct rte_mbuf *mbuf;
180 unsigned int framecount, framenum;
182 struct pkt_tx_queue *pkt_q = queue;
184 unsigned long num_tx_bytes = 0;
187 if (unlikely(nb_pkts == 0))
190 memset(&pfd, 0, sizeof(pfd));
191 pfd.fd = pkt_q->sockfd;
192 pfd.events = POLLOUT;
195 framecount = pkt_q->framecount;
196 framenum = pkt_q->framenum;
197 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
198 for (i = 0; i < nb_pkts; i++) {
201 /* drop oversized packets */
202 if (mbuf->pkt_len > pkt_q->frame_data_size) {
203 rte_pktmbuf_free(mbuf);
207 /* insert vlan info if necessary */
208 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
209 if (rte_vlan_insert(&mbuf)) {
210 rte_pktmbuf_free(mbuf);
215 /* point at the next incoming frame */
216 if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
217 (poll(&pfd, 1, -1) < 0))
220 /* copy the tx frame data */
221 pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
222 sizeof(struct sockaddr_ll);
224 struct rte_mbuf *tmp_mbuf = mbuf;
226 uint16_t data_len = rte_pktmbuf_data_len(tmp_mbuf);
227 memcpy(pbuf, rte_pktmbuf_mtod(tmp_mbuf, void*), data_len);
229 tmp_mbuf = tmp_mbuf->next;
232 ppd->tp_len = mbuf->pkt_len;
233 ppd->tp_snaplen = mbuf->pkt_len;
235 /* release incoming frame and advance ring buffer */
236 ppd->tp_status = TP_STATUS_SEND_REQUEST;
237 if (++framenum >= framecount)
239 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
242 num_tx_bytes += mbuf->pkt_len;
243 rte_pktmbuf_free(mbuf);
246 /* kick-off transmits */
247 if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1 &&
248 errno != ENOBUFS && errno != EAGAIN) {
250 * In case of a ENOBUFS/EAGAIN error all of the enqueued
251 * packets will be considered successful even though only some
259 pkt_q->framenum = framenum;
260 pkt_q->tx_pkts += num_tx;
261 pkt_q->err_pkts += i - num_tx;
262 pkt_q->tx_bytes += num_tx_bytes;
267 eth_dev_start(struct rte_eth_dev *dev)
269 dev->data->dev_link.link_status = ETH_LINK_UP;
274 * This function gets called when the current port gets stopped.
277 eth_dev_stop(struct rte_eth_dev *dev)
281 struct pmd_internals *internals = dev->data->dev_private;
283 for (i = 0; i < internals->nb_queues; i++) {
284 sockfd = internals->rx_queue[i].sockfd;
288 /* Prevent use after free in case tx fd == rx fd */
289 if (sockfd != internals->tx_queue[i].sockfd) {
290 sockfd = internals->tx_queue[i].sockfd;
295 internals->rx_queue[i].sockfd = -1;
296 internals->tx_queue[i].sockfd = -1;
299 dev->data->dev_link.link_status = ETH_LINK_DOWN;
303 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
309 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
311 struct pmd_internals *internals = dev->data->dev_private;
313 dev_info->if_index = internals->if_index;
314 dev_info->max_mac_addrs = 1;
315 dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
316 dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
317 dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
318 dev_info->min_rx_bufsize = 0;
319 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
320 DEV_TX_OFFLOAD_VLAN_INSERT;
326 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
329 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
330 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
331 const struct pmd_internals *internal = dev->data->dev_private;
333 imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
334 internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
335 for (i = 0; i < imax; i++) {
336 igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
337 igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
338 rx_total += igb_stats->q_ipackets[i];
339 rx_bytes_total += igb_stats->q_ibytes[i];
342 imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
343 internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
344 for (i = 0; i < imax; i++) {
345 igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
346 igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
347 tx_total += igb_stats->q_opackets[i];
348 tx_err_total += internal->tx_queue[i].err_pkts;
349 tx_bytes_total += igb_stats->q_obytes[i];
352 igb_stats->ipackets = rx_total;
353 igb_stats->ibytes = rx_bytes_total;
354 igb_stats->opackets = tx_total;
355 igb_stats->oerrors = tx_err_total;
356 igb_stats->obytes = tx_bytes_total;
361 eth_stats_reset(struct rte_eth_dev *dev)
364 struct pmd_internals *internal = dev->data->dev_private;
366 for (i = 0; i < internal->nb_queues; i++) {
367 internal->rx_queue[i].rx_pkts = 0;
368 internal->rx_queue[i].rx_bytes = 0;
371 for (i = 0; i < internal->nb_queues; i++) {
372 internal->tx_queue[i].tx_pkts = 0;
373 internal->tx_queue[i].err_pkts = 0;
374 internal->tx_queue[i].tx_bytes = 0;
381 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
386 eth_queue_release(void *q __rte_unused)
391 eth_link_update(struct rte_eth_dev *dev __rte_unused,
392 int wait_to_complete __rte_unused)
398 eth_rx_queue_setup(struct rte_eth_dev *dev,
399 uint16_t rx_queue_id,
400 uint16_t nb_rx_desc __rte_unused,
401 unsigned int socket_id __rte_unused,
402 const struct rte_eth_rxconf *rx_conf __rte_unused,
403 struct rte_mempool *mb_pool)
405 struct pmd_internals *internals = dev->data->dev_private;
406 struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
407 unsigned int buf_size, data_size;
409 pkt_q->mb_pool = mb_pool;
411 /* Now get the space available for data in the mbuf */
412 buf_size = rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
413 RTE_PKTMBUF_HEADROOM;
414 data_size = internals->req.tp_frame_size;
415 data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll);
417 if (data_size > buf_size) {
419 "%s: %d bytes will not fit in mbuf (%d bytes)",
420 dev->device->name, data_size, buf_size);
424 dev->data->rx_queues[rx_queue_id] = pkt_q;
425 pkt_q->in_port = dev->data->port_id;
431 eth_tx_queue_setup(struct rte_eth_dev *dev,
432 uint16_t tx_queue_id,
433 uint16_t nb_tx_desc __rte_unused,
434 unsigned int socket_id __rte_unused,
435 const struct rte_eth_txconf *tx_conf __rte_unused)
438 struct pmd_internals *internals = dev->data->dev_private;
440 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
445 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
447 struct pmd_internals *internals = dev->data->dev_private;
448 struct ifreq ifr = { .ifr_mtu = mtu };
451 unsigned int data_size = internals->req.tp_frame_size -
457 s = socket(PF_INET, SOCK_DGRAM, 0);
461 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
462 ret = ioctl(s, SIOCSIFMTU, &ifr);
472 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
478 s = socket(PF_INET, SOCK_DGRAM, 0);
482 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
483 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
487 ifr.ifr_flags &= mask;
488 ifr.ifr_flags |= flags;
489 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
499 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
501 struct pmd_internals *internals = dev->data->dev_private;
503 return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
507 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
509 struct pmd_internals *internals = dev->data->dev_private;
511 return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
514 static const struct eth_dev_ops ops = {
515 .dev_start = eth_dev_start,
516 .dev_stop = eth_dev_stop,
517 .dev_close = eth_dev_close,
518 .dev_configure = eth_dev_configure,
519 .dev_infos_get = eth_dev_info,
520 .mtu_set = eth_dev_mtu_set,
521 .promiscuous_enable = eth_dev_promiscuous_enable,
522 .promiscuous_disable = eth_dev_promiscuous_disable,
523 .rx_queue_setup = eth_rx_queue_setup,
524 .tx_queue_setup = eth_tx_queue_setup,
525 .rx_queue_release = eth_queue_release,
526 .tx_queue_release = eth_queue_release,
527 .link_update = eth_link_update,
528 .stats_get = eth_stats_get,
529 .stats_reset = eth_stats_reset,
533 * Opens an AF_PACKET socket
536 open_packet_iface(const char *key __rte_unused,
537 const char *value __rte_unused,
540 int *sockfd = extra_args;
542 /* Open an AF_PACKET socket... */
543 *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
545 PMD_LOG(ERR, "Could not open AF_PACKET socket");
553 rte_pmd_init_internals(struct rte_vdev_device *dev,
555 const unsigned nb_queues,
556 unsigned int blocksize,
557 unsigned int blockcnt,
558 unsigned int framesize,
559 unsigned int framecnt,
560 unsigned int qdisc_bypass,
561 struct pmd_internals **internals,
562 struct rte_eth_dev **eth_dev,
563 struct rte_kvargs *kvlist)
565 const char *name = rte_vdev_device_name(dev);
566 const unsigned int numa_node = dev->device.numa_node;
567 struct rte_eth_dev_data *data = NULL;
568 struct rte_kvargs_pair *pair = NULL;
572 struct sockaddr_ll sockaddr;
573 struct tpacket_req *req;
574 struct pkt_rx_queue *rx_queue;
575 struct pkt_tx_queue *tx_queue;
576 int rc, tpver, discard;
578 unsigned int i, q, rdsize;
579 #if defined(PACKET_FANOUT)
583 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
584 pair = &kvlist->pairs[k_idx];
585 if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL)
590 "%s: no interface specified for AF_PACKET ethdev",
596 "%s: creating AF_PACKET-backed ethdev on numa socket %u",
599 *internals = rte_zmalloc_socket(name, sizeof(**internals),
601 if (*internals == NULL)
604 for (q = 0; q < nb_queues; q++) {
605 (*internals)->rx_queue[q].map = MAP_FAILED;
606 (*internals)->tx_queue[q].map = MAP_FAILED;
609 req = &((*internals)->req);
611 req->tp_block_size = blocksize;
612 req->tp_block_nr = blockcnt;
613 req->tp_frame_size = framesize;
614 req->tp_frame_nr = framecnt;
616 ifnamelen = strlen(pair->value);
617 if (ifnamelen < sizeof(ifr.ifr_name)) {
618 memcpy(ifr.ifr_name, pair->value, ifnamelen);
619 ifr.ifr_name[ifnamelen] = '\0';
622 "%s: I/F name too long (%s)",
626 if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
627 PMD_LOG_ERRNO(ERR, "%s: ioctl failed (SIOCGIFINDEX)", name);
630 (*internals)->if_name = strdup(pair->value);
631 if ((*internals)->if_name == NULL)
633 (*internals)->if_index = ifr.ifr_ifindex;
635 if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
636 PMD_LOG_ERRNO(ERR, "%s: ioctl failed (SIOCGIFHWADDR)", name);
639 memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
641 memset(&sockaddr, 0, sizeof(sockaddr));
642 sockaddr.sll_family = AF_PACKET;
643 sockaddr.sll_protocol = htons(ETH_P_ALL);
644 sockaddr.sll_ifindex = (*internals)->if_index;
646 #if defined(PACKET_FANOUT)
647 fanout_arg = (getpid() ^ (*internals)->if_index) & 0xffff;
648 fanout_arg |= (PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG) << 16;
649 #if defined(PACKET_FANOUT_FLAG_ROLLOVER)
650 fanout_arg |= PACKET_FANOUT_FLAG_ROLLOVER << 16;
654 for (q = 0; q < nb_queues; q++) {
655 /* Open an AF_PACKET socket for this queue... */
656 qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
659 "%s: could not open AF_PACKET socket",
665 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION,
666 &tpver, sizeof(tpver));
669 "%s: could not set PACKET_VERSION on AF_PACKET socket for %s",
675 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS,
676 &discard, sizeof(discard));
679 "%s: could not set PACKET_LOSS on AF_PACKET socket for %s",
684 #if defined(PACKET_QDISC_BYPASS)
685 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
686 &qdisc_bypass, sizeof(qdisc_bypass));
689 "%s: could not set PACKET_QDISC_BYPASS on AF_PACKET socket for %s",
694 RTE_SET_USED(qdisc_bypass);
697 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
700 "%s: could not set PACKET_RX_RING on AF_PACKET socket for %s",
705 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req));
708 "%s: could not set PACKET_TX_RING on AF_PACKET "
709 "socket for %s", name, pair->value);
713 rx_queue = &((*internals)->rx_queue[q]);
714 rx_queue->framecount = req->tp_frame_nr;
716 rx_queue->map = mmap(NULL, 2 * req->tp_block_size * req->tp_block_nr,
717 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
719 if (rx_queue->map == MAP_FAILED) {
721 "%s: call to mmap failed on AF_PACKET socket for %s",
726 /* rdsize is same for both Tx and Rx */
727 rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd));
729 rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
730 if (rx_queue->rd == NULL)
732 for (i = 0; i < req->tp_frame_nr; ++i) {
733 rx_queue->rd[i].iov_base = rx_queue->map + (i * framesize);
734 rx_queue->rd[i].iov_len = req->tp_frame_size;
736 rx_queue->sockfd = qsockfd;
738 tx_queue = &((*internals)->tx_queue[q]);
739 tx_queue->framecount = req->tp_frame_nr;
740 tx_queue->frame_data_size = req->tp_frame_size;
741 tx_queue->frame_data_size -= TPACKET2_HDRLEN -
742 sizeof(struct sockaddr_ll);
744 tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr;
746 tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
747 if (tx_queue->rd == NULL)
749 for (i = 0; i < req->tp_frame_nr; ++i) {
750 tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize);
751 tx_queue->rd[i].iov_len = req->tp_frame_size;
753 tx_queue->sockfd = qsockfd;
755 rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
758 "%s: could not bind AF_PACKET socket to %s",
763 #if defined(PACKET_FANOUT)
764 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT,
765 &fanout_arg, sizeof(fanout_arg));
768 "%s: could not set PACKET_FANOUT on AF_PACKET socket for %s",
775 /* reserve an ethdev entry */
776 *eth_dev = rte_eth_vdev_allocate(dev, 0);
777 if (*eth_dev == NULL)
781 * now put it all together
782 * - store queue data in internals,
783 * - store numa_node in eth_dev
784 * - point eth_dev_data to internals
785 * - and point eth_dev structure to new eth_dev_data structure
788 (*internals)->nb_queues = nb_queues;
790 data = (*eth_dev)->data;
791 data->dev_private = *internals;
792 data->nb_rx_queues = (uint16_t)nb_queues;
793 data->nb_tx_queues = (uint16_t)nb_queues;
794 data->dev_link = pmd_link;
795 data->mac_addrs = &(*internals)->eth_addr;
797 (*eth_dev)->dev_ops = &ops;
804 for (q = 0; q < nb_queues; q++) {
805 munmap((*internals)->rx_queue[q].map,
806 2 * req->tp_block_size * req->tp_block_nr);
808 rte_free((*internals)->rx_queue[q].rd);
809 rte_free((*internals)->tx_queue[q].rd);
810 if (((*internals)->rx_queue[q].sockfd != 0) &&
811 ((*internals)->rx_queue[q].sockfd != qsockfd))
812 close((*internals)->rx_queue[q].sockfd);
814 free((*internals)->if_name);
815 rte_free(*internals);
820 rte_eth_from_packet(struct rte_vdev_device *dev,
822 struct rte_kvargs *kvlist)
824 const char *name = rte_vdev_device_name(dev);
825 struct pmd_internals *internals = NULL;
826 struct rte_eth_dev *eth_dev = NULL;
827 struct rte_kvargs_pair *pair = NULL;
829 unsigned int blockcount;
830 unsigned int blocksize;
831 unsigned int framesize = DFLT_FRAME_SIZE;
832 unsigned int framecount = DFLT_FRAME_COUNT;
833 unsigned int qpairs = 1;
834 unsigned int qdisc_bypass = 1;
836 /* do some parameter checking */
840 blocksize = getpagesize();
843 * Walk arguments for configurable settings
845 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
846 pair = &kvlist->pairs[k_idx];
847 if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) {
848 qpairs = atoi(pair->value);
850 qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
852 "%s: invalid qpairs value",
858 if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
859 blocksize = atoi(pair->value);
862 "%s: invalid blocksize value",
868 if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
869 framesize = atoi(pair->value);
872 "%s: invalid framesize value",
878 if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
879 framecount = atoi(pair->value);
882 "%s: invalid framecount value",
888 if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) {
889 qdisc_bypass = atoi(pair->value);
890 if (qdisc_bypass > 1) {
892 "%s: invalid bypass value",
900 if (framesize > blocksize) {
902 "%s: AF_PACKET MMAP frame size exceeds block size!",
907 blockcount = framecount / (blocksize / framesize);
910 "%s: invalid AF_PACKET MMAP parameters", name);
914 PMD_LOG(INFO, "%s: AF_PACKET MMAP parameters:", name);
915 PMD_LOG(INFO, "%s:\tblock size %d", name, blocksize);
916 PMD_LOG(INFO, "%s:\tblock count %d", name, blockcount);
917 PMD_LOG(INFO, "%s:\tframe size %d", name, framesize);
918 PMD_LOG(INFO, "%s:\tframe count %d", name, framecount);
920 if (rte_pmd_init_internals(dev, *sockfd, qpairs,
921 blocksize, blockcount,
922 framesize, framecount,
924 &internals, ð_dev,
928 eth_dev->rx_pkt_burst = eth_af_packet_rx;
929 eth_dev->tx_pkt_burst = eth_af_packet_tx;
931 rte_eth_dev_probing_finish(eth_dev);
936 rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
939 struct rte_kvargs *kvlist;
941 struct rte_eth_dev *eth_dev;
942 const char *name = rte_vdev_device_name(dev);
944 PMD_LOG(INFO, "Initializing pmd_af_packet for %s", name);
946 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
947 eth_dev = rte_eth_dev_attach_secondary(name);
949 PMD_LOG(ERR, "Failed to probe %s", name);
952 /* TODO: request info from primary to set up Rx and Tx */
953 eth_dev->dev_ops = &ops;
954 eth_dev->device = &dev->device;
955 rte_eth_dev_probing_finish(eth_dev);
959 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
960 if (kvlist == NULL) {
966 * If iface argument is passed we open the NICs and use them for
969 if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) {
971 ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG,
972 &open_packet_iface, &sockfd);
977 if (dev->device.numa_node == SOCKET_ID_ANY)
978 dev->device.numa_node = rte_socket_id();
980 ret = rte_eth_from_packet(dev, &sockfd, kvlist);
981 close(sockfd); /* no longer needed */
984 rte_kvargs_free(kvlist);
989 rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
991 struct rte_eth_dev *eth_dev = NULL;
992 struct pmd_internals *internals;
993 struct tpacket_req *req;
996 PMD_LOG(INFO, "Closing AF_PACKET ethdev on numa socket %u",
1002 /* find the ethdev entry */
1003 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1004 if (eth_dev == NULL)
1007 /* mac_addrs must not be freed alone because part of dev_private */
1008 eth_dev->data->mac_addrs = NULL;
1010 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1011 return rte_eth_dev_release_port(eth_dev);
1013 internals = eth_dev->data->dev_private;
1014 req = &internals->req;
1015 for (q = 0; q < internals->nb_queues; q++) {
1016 munmap(internals->rx_queue[q].map,
1017 2 * req->tp_block_size * req->tp_block_nr);
1018 rte_free(internals->rx_queue[q].rd);
1019 rte_free(internals->tx_queue[q].rd);
1021 free(internals->if_name);
1023 rte_eth_dev_release_port(eth_dev);
1028 static struct rte_vdev_driver pmd_af_packet_drv = {
1029 .probe = rte_pmd_af_packet_probe,
1030 .remove = rte_pmd_af_packet_remove,
1033 RTE_PMD_REGISTER_VDEV(net_af_packet, pmd_af_packet_drv);
1034 RTE_PMD_REGISTER_ALIAS(net_af_packet, eth_af_packet);
1035 RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
1041 "qdisc_bypass=<0|1>");
1043 RTE_INIT(af_packet_init_log)
1045 af_packet_logtype = rte_log_register("pmd.net.packet");
1046 if (af_packet_logtype >= 0)
1047 rte_log_set_level(af_packet_logtype, RTE_LOG_NOTICE);