1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014 John W. Linville <linville@tuxdriver.com>
3 * Originally based upon librte_pmd_pcap code:
4 * Copyright(c) 2010-2015 Intel Corporation.
5 * Copyright(c) 2014 6WIND S.A.
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_kvargs.h>
14 #include <rte_bus_vdev.h>
16 #include <linux/if_ether.h>
17 #include <linux/if_packet.h>
18 #include <arpa/inet.h>
20 #include <sys/types.h>
21 #include <sys/socket.h>
22 #include <sys/ioctl.h>
27 #define ETH_AF_PACKET_IFACE_ARG "iface"
28 #define ETH_AF_PACKET_NUM_Q_ARG "qpairs"
29 #define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz"
30 #define ETH_AF_PACKET_FRAMESIZE_ARG "framesz"
31 #define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt"
32 #define ETH_AF_PACKET_QDISC_BYPASS_ARG "qdisc_bypass"
34 #define DFLT_BLOCK_SIZE (1 << 12)
35 #define DFLT_FRAME_SIZE (1 << 11)
36 #define DFLT_FRAME_COUNT (1 << 9)
38 #define RTE_PMD_AF_PACKET_MAX_RINGS 16
45 unsigned int framecount;
46 unsigned int framenum;
48 struct rte_mempool *mb_pool;
51 volatile unsigned long rx_pkts;
52 volatile unsigned long err_pkts;
53 volatile unsigned long rx_bytes;
58 unsigned int frame_data_size;
62 unsigned int framecount;
63 unsigned int framenum;
65 volatile unsigned long tx_pkts;
66 volatile unsigned long err_pkts;
67 volatile unsigned long tx_bytes;
70 struct pmd_internals {
75 struct ether_addr eth_addr;
77 struct tpacket_req req;
79 struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
80 struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
83 static const char *valid_arguments[] = {
84 ETH_AF_PACKET_IFACE_ARG,
85 ETH_AF_PACKET_NUM_Q_ARG,
86 ETH_AF_PACKET_BLOCKSIZE_ARG,
87 ETH_AF_PACKET_FRAMESIZE_ARG,
88 ETH_AF_PACKET_FRAMECOUNT_ARG,
89 ETH_AF_PACKET_QDISC_BYPASS_ARG,
93 static struct rte_eth_link pmd_link = {
94 .link_speed = ETH_SPEED_NUM_10G,
95 .link_duplex = ETH_LINK_FULL_DUPLEX,
96 .link_status = ETH_LINK_DOWN,
97 .link_autoneg = ETH_LINK_AUTONEG
101 eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
104 struct tpacket2_hdr *ppd;
105 struct rte_mbuf *mbuf;
107 struct pkt_rx_queue *pkt_q = queue;
109 unsigned long num_rx_bytes = 0;
110 unsigned int framecount, framenum;
112 if (unlikely(nb_pkts == 0))
116 * Reads the given number of packets from the AF_PACKET socket one by
117 * one and copies the packet data into a newly allocated mbuf.
119 framecount = pkt_q->framecount;
120 framenum = pkt_q->framenum;
121 for (i = 0; i < nb_pkts; i++) {
122 /* point at the next incoming frame */
123 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
124 if ((ppd->tp_status & TP_STATUS_USER) == 0)
127 /* allocate the next mbuf */
128 mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool);
129 if (unlikely(mbuf == NULL))
132 /* packet will fit in the mbuf, go ahead and receive it */
133 rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) = ppd->tp_snaplen;
134 pbuf = (uint8_t *) ppd + ppd->tp_mac;
135 memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
137 /* check for vlan info */
138 if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
139 mbuf->vlan_tci = ppd->tp_vlan_tci;
140 mbuf->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
143 /* release incoming frame and advance ring buffer */
144 ppd->tp_status = TP_STATUS_KERNEL;
145 if (++framenum >= framecount)
147 mbuf->port = pkt_q->in_port;
149 /* account for the receive frame */
152 num_rx_bytes += mbuf->pkt_len;
154 pkt_q->framenum = framenum;
155 pkt_q->rx_pkts += num_rx;
156 pkt_q->rx_bytes += num_rx_bytes;
161 * Callback to handle sending packets through a real NIC.
164 eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
166 struct tpacket2_hdr *ppd;
167 struct rte_mbuf *mbuf;
169 unsigned int framecount, framenum;
171 struct pkt_tx_queue *pkt_q = queue;
173 unsigned long num_tx_bytes = 0;
176 if (unlikely(nb_pkts == 0))
179 memset(&pfd, 0, sizeof(pfd));
180 pfd.fd = pkt_q->sockfd;
181 pfd.events = POLLOUT;
184 framecount = pkt_q->framecount;
185 framenum = pkt_q->framenum;
186 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
187 for (i = 0; i < nb_pkts; i++) {
190 /* drop oversized packets */
191 if (mbuf->pkt_len > pkt_q->frame_data_size) {
192 rte_pktmbuf_free(mbuf);
196 /* insert vlan info if necessary */
197 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
198 if (rte_vlan_insert(&mbuf)) {
199 rte_pktmbuf_free(mbuf);
204 /* point at the next incoming frame */
205 if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
206 (poll(&pfd, 1, -1) < 0))
209 /* copy the tx frame data */
210 pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
211 sizeof(struct sockaddr_ll);
213 struct rte_mbuf *tmp_mbuf = mbuf;
215 uint16_t data_len = rte_pktmbuf_data_len(tmp_mbuf);
216 memcpy(pbuf, rte_pktmbuf_mtod(tmp_mbuf, void*), data_len);
218 tmp_mbuf = tmp_mbuf->next;
221 ppd->tp_len = mbuf->pkt_len;
222 ppd->tp_snaplen = mbuf->pkt_len;
224 /* release incoming frame and advance ring buffer */
225 ppd->tp_status = TP_STATUS_SEND_REQUEST;
226 if (++framenum >= framecount)
228 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
231 num_tx_bytes += mbuf->pkt_len;
232 rte_pktmbuf_free(mbuf);
235 /* kick-off transmits */
236 if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1) {
237 /* error sending -- no packets transmitted */
242 pkt_q->framenum = framenum;
243 pkt_q->tx_pkts += num_tx;
244 pkt_q->err_pkts += i - num_tx;
245 pkt_q->tx_bytes += num_tx_bytes;
250 eth_dev_start(struct rte_eth_dev *dev)
252 dev->data->dev_link.link_status = ETH_LINK_UP;
257 * This function gets called when the current port gets stopped.
260 eth_dev_stop(struct rte_eth_dev *dev)
264 struct pmd_internals *internals = dev->data->dev_private;
266 for (i = 0; i < internals->nb_queues; i++) {
267 sockfd = internals->rx_queue[i].sockfd;
271 /* Prevent use after free in case tx fd == rx fd */
272 if (sockfd != internals->tx_queue[i].sockfd) {
273 sockfd = internals->tx_queue[i].sockfd;
278 internals->rx_queue[i].sockfd = -1;
279 internals->tx_queue[i].sockfd = -1;
282 dev->data->dev_link.link_status = ETH_LINK_DOWN;
286 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
292 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
294 struct pmd_internals *internals = dev->data->dev_private;
296 dev_info->if_index = internals->if_index;
297 dev_info->max_mac_addrs = 1;
298 dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
299 dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
300 dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
301 dev_info->min_rx_bufsize = 0;
305 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
308 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
309 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
310 const struct pmd_internals *internal = dev->data->dev_private;
312 imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
313 internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
314 for (i = 0; i < imax; i++) {
315 igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
316 igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
317 rx_total += igb_stats->q_ipackets[i];
318 rx_bytes_total += igb_stats->q_ibytes[i];
321 imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
322 internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
323 for (i = 0; i < imax; i++) {
324 igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
325 igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
326 igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
327 tx_total += igb_stats->q_opackets[i];
328 tx_err_total += igb_stats->q_errors[i];
329 tx_bytes_total += igb_stats->q_obytes[i];
332 igb_stats->ipackets = rx_total;
333 igb_stats->ibytes = rx_bytes_total;
334 igb_stats->opackets = tx_total;
335 igb_stats->oerrors = tx_err_total;
336 igb_stats->obytes = tx_bytes_total;
341 eth_stats_reset(struct rte_eth_dev *dev)
344 struct pmd_internals *internal = dev->data->dev_private;
346 for (i = 0; i < internal->nb_queues; i++) {
347 internal->rx_queue[i].rx_pkts = 0;
348 internal->rx_queue[i].rx_bytes = 0;
351 for (i = 0; i < internal->nb_queues; i++) {
352 internal->tx_queue[i].tx_pkts = 0;
353 internal->tx_queue[i].err_pkts = 0;
354 internal->tx_queue[i].tx_bytes = 0;
359 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
364 eth_queue_release(void *q __rte_unused)
369 eth_link_update(struct rte_eth_dev *dev __rte_unused,
370 int wait_to_complete __rte_unused)
376 eth_rx_queue_setup(struct rte_eth_dev *dev,
377 uint16_t rx_queue_id,
378 uint16_t nb_rx_desc __rte_unused,
379 unsigned int socket_id __rte_unused,
380 const struct rte_eth_rxconf *rx_conf __rte_unused,
381 struct rte_mempool *mb_pool)
383 struct pmd_internals *internals = dev->data->dev_private;
384 struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
385 unsigned int buf_size, data_size;
387 pkt_q->mb_pool = mb_pool;
389 /* Now get the space available for data in the mbuf */
390 buf_size = rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
391 RTE_PKTMBUF_HEADROOM;
392 data_size = internals->req.tp_frame_size;
393 data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll);
395 if (data_size > buf_size) {
397 "%s: %d bytes will not fit in mbuf (%d bytes)\n",
398 dev->device->name, data_size, buf_size);
402 dev->data->rx_queues[rx_queue_id] = pkt_q;
403 pkt_q->in_port = dev->data->port_id;
409 eth_tx_queue_setup(struct rte_eth_dev *dev,
410 uint16_t tx_queue_id,
411 uint16_t nb_tx_desc __rte_unused,
412 unsigned int socket_id __rte_unused,
413 const struct rte_eth_txconf *tx_conf __rte_unused)
416 struct pmd_internals *internals = dev->data->dev_private;
418 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
423 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
425 struct pmd_internals *internals = dev->data->dev_private;
426 struct ifreq ifr = { .ifr_mtu = mtu };
429 unsigned int data_size = internals->req.tp_frame_size -
431 sizeof(struct sockaddr_ll);
436 s = socket(PF_INET, SOCK_DGRAM, 0);
440 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", internals->if_name);
441 ret = ioctl(s, SIOCSIFMTU, &ifr);
451 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
456 s = socket(PF_INET, SOCK_DGRAM, 0);
460 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", if_name);
461 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0)
463 ifr.ifr_flags &= mask;
464 ifr.ifr_flags |= flags;
465 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0)
472 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
474 struct pmd_internals *internals = dev->data->dev_private;
476 eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
480 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
482 struct pmd_internals *internals = dev->data->dev_private;
484 eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
487 static const struct eth_dev_ops ops = {
488 .dev_start = eth_dev_start,
489 .dev_stop = eth_dev_stop,
490 .dev_close = eth_dev_close,
491 .dev_configure = eth_dev_configure,
492 .dev_infos_get = eth_dev_info,
493 .mtu_set = eth_dev_mtu_set,
494 .promiscuous_enable = eth_dev_promiscuous_enable,
495 .promiscuous_disable = eth_dev_promiscuous_disable,
496 .rx_queue_setup = eth_rx_queue_setup,
497 .tx_queue_setup = eth_tx_queue_setup,
498 .rx_queue_release = eth_queue_release,
499 .tx_queue_release = eth_queue_release,
500 .link_update = eth_link_update,
501 .stats_get = eth_stats_get,
502 .stats_reset = eth_stats_reset,
506 * Opens an AF_PACKET socket
509 open_packet_iface(const char *key __rte_unused,
510 const char *value __rte_unused,
513 int *sockfd = extra_args;
515 /* Open an AF_PACKET socket... */
516 *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
518 RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n");
525 static struct rte_vdev_driver pmd_af_packet_drv;
528 rte_pmd_init_internals(struct rte_vdev_device *dev,
530 const unsigned nb_queues,
531 unsigned int blocksize,
532 unsigned int blockcnt,
533 unsigned int framesize,
534 unsigned int framecnt,
535 unsigned int qdisc_bypass,
536 struct pmd_internals **internals,
537 struct rte_eth_dev **eth_dev,
538 struct rte_kvargs *kvlist)
540 const char *name = rte_vdev_device_name(dev);
541 const unsigned int numa_node = dev->device.numa_node;
542 struct rte_eth_dev_data *data = NULL;
543 struct rte_kvargs_pair *pair = NULL;
547 struct sockaddr_ll sockaddr;
548 struct tpacket_req *req;
549 struct pkt_rx_queue *rx_queue;
550 struct pkt_tx_queue *tx_queue;
551 int rc, tpver, discard;
553 unsigned int i, q, rdsize;
554 #if defined(PACKET_FANOUT)
558 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
559 pair = &kvlist->pairs[k_idx];
560 if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL)
565 "%s: no interface specified for AF_PACKET ethdev\n",
571 "%s: creating AF_PACKET-backed ethdev on numa socket %u\n",
575 * now do all data allocation - for eth_dev structure, dummy pci driver
576 * and internal (private) data
578 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
582 *internals = rte_zmalloc_socket(name, sizeof(**internals),
584 if (*internals == NULL)
587 for (q = 0; q < nb_queues; q++) {
588 (*internals)->rx_queue[q].map = MAP_FAILED;
589 (*internals)->tx_queue[q].map = MAP_FAILED;
592 req = &((*internals)->req);
594 req->tp_block_size = blocksize;
595 req->tp_block_nr = blockcnt;
596 req->tp_frame_size = framesize;
597 req->tp_frame_nr = framecnt;
599 ifnamelen = strlen(pair->value);
600 if (ifnamelen < sizeof(ifr.ifr_name)) {
601 memcpy(ifr.ifr_name, pair->value, ifnamelen);
602 ifr.ifr_name[ifnamelen] = '\0';
605 "%s: I/F name too long (%s)\n",
609 if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
611 "%s: ioctl failed (SIOCGIFINDEX)\n",
615 (*internals)->if_name = strdup(pair->value);
616 if ((*internals)->if_name == NULL)
618 (*internals)->if_index = ifr.ifr_ifindex;
620 if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
622 "%s: ioctl failed (SIOCGIFHWADDR)\n",
626 memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
628 memset(&sockaddr, 0, sizeof(sockaddr));
629 sockaddr.sll_family = AF_PACKET;
630 sockaddr.sll_protocol = htons(ETH_P_ALL);
631 sockaddr.sll_ifindex = (*internals)->if_index;
633 #if defined(PACKET_FANOUT)
634 fanout_arg = (getpid() ^ (*internals)->if_index) & 0xffff;
635 fanout_arg |= (PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG) << 16;
636 #if defined(PACKET_FANOUT_FLAG_ROLLOVER)
637 fanout_arg |= PACKET_FANOUT_FLAG_ROLLOVER << 16;
641 for (q = 0; q < nb_queues; q++) {
642 /* Open an AF_PACKET socket for this queue... */
643 qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
646 "%s: could not open AF_PACKET socket\n",
652 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION,
653 &tpver, sizeof(tpver));
656 "%s: could not set PACKET_VERSION on AF_PACKET "
657 "socket for %s\n", name, pair->value);
662 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS,
663 &discard, sizeof(discard));
666 "%s: could not set PACKET_LOSS on "
667 "AF_PACKET socket for %s\n", name, pair->value);
671 #if defined(PACKET_QDISC_BYPASS)
672 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
673 &qdisc_bypass, sizeof(qdisc_bypass));
676 "%s: could not set PACKET_QDISC_BYPASS "
677 "on AF_PACKET socket for %s\n", name,
682 RTE_SET_USED(qdisc_bypass);
685 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
688 "%s: could not set PACKET_RX_RING on AF_PACKET "
689 "socket for %s\n", name, pair->value);
693 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req));
696 "%s: could not set PACKET_TX_RING on AF_PACKET "
697 "socket for %s\n", name, pair->value);
701 rx_queue = &((*internals)->rx_queue[q]);
702 rx_queue->framecount = req->tp_frame_nr;
704 rx_queue->map = mmap(NULL, 2 * req->tp_block_size * req->tp_block_nr,
705 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
707 if (rx_queue->map == MAP_FAILED) {
709 "%s: call to mmap failed on AF_PACKET socket for %s\n",
714 /* rdsize is same for both Tx and Rx */
715 rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd));
717 rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
718 if (rx_queue->rd == NULL)
720 for (i = 0; i < req->tp_frame_nr; ++i) {
721 rx_queue->rd[i].iov_base = rx_queue->map + (i * framesize);
722 rx_queue->rd[i].iov_len = req->tp_frame_size;
724 rx_queue->sockfd = qsockfd;
726 tx_queue = &((*internals)->tx_queue[q]);
727 tx_queue->framecount = req->tp_frame_nr;
728 tx_queue->frame_data_size = req->tp_frame_size;
729 tx_queue->frame_data_size -= TPACKET2_HDRLEN -
730 sizeof(struct sockaddr_ll);
732 tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr;
734 tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
735 if (tx_queue->rd == NULL)
737 for (i = 0; i < req->tp_frame_nr; ++i) {
738 tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize);
739 tx_queue->rd[i].iov_len = req->tp_frame_size;
741 tx_queue->sockfd = qsockfd;
743 rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
746 "%s: could not bind AF_PACKET socket to %s\n",
751 #if defined(PACKET_FANOUT)
752 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT,
753 &fanout_arg, sizeof(fanout_arg));
756 "%s: could not set PACKET_FANOUT on AF_PACKET socket "
757 "for %s\n", name, pair->value);
763 /* reserve an ethdev entry */
764 *eth_dev = rte_eth_vdev_allocate(dev, 0);
765 if (*eth_dev == NULL)
769 * now put it all together
770 * - store queue data in internals,
771 * - store numa_node in eth_dev
772 * - point eth_dev_data to internals
773 * - and point eth_dev structure to new eth_dev_data structure
776 (*internals)->nb_queues = nb_queues;
778 rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
779 data->dev_private = *internals;
780 data->nb_rx_queues = (uint16_t)nb_queues;
781 data->nb_tx_queues = (uint16_t)nb_queues;
782 data->dev_link = pmd_link;
783 data->mac_addrs = &(*internals)->eth_addr;
785 (*eth_dev)->data = data;
786 (*eth_dev)->dev_ops = &ops;
793 for (q = 0; q < nb_queues; q++) {
794 munmap((*internals)->rx_queue[q].map,
795 2 * req->tp_block_size * req->tp_block_nr);
797 rte_free((*internals)->rx_queue[q].rd);
798 rte_free((*internals)->tx_queue[q].rd);
799 if (((*internals)->rx_queue[q].sockfd != 0) &&
800 ((*internals)->rx_queue[q].sockfd != qsockfd))
801 close((*internals)->rx_queue[q].sockfd);
803 free((*internals)->if_name);
804 rte_free(*internals);
811 rte_eth_from_packet(struct rte_vdev_device *dev,
813 struct rte_kvargs *kvlist)
815 const char *name = rte_vdev_device_name(dev);
816 struct pmd_internals *internals = NULL;
817 struct rte_eth_dev *eth_dev = NULL;
818 struct rte_kvargs_pair *pair = NULL;
820 unsigned int blockcount;
821 unsigned int blocksize = DFLT_BLOCK_SIZE;
822 unsigned int framesize = DFLT_FRAME_SIZE;
823 unsigned int framecount = DFLT_FRAME_COUNT;
824 unsigned int qpairs = 1;
825 unsigned int qdisc_bypass = 1;
827 /* do some parameter checking */
832 * Walk arguments for configurable settings
834 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
835 pair = &kvlist->pairs[k_idx];
836 if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) {
837 qpairs = atoi(pair->value);
839 qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
841 "%s: invalid qpairs value\n",
847 if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
848 blocksize = atoi(pair->value);
851 "%s: invalid blocksize value\n",
857 if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
858 framesize = atoi(pair->value);
861 "%s: invalid framesize value\n",
867 if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
868 framecount = atoi(pair->value);
871 "%s: invalid framecount value\n",
877 if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) {
878 qdisc_bypass = atoi(pair->value);
879 if (qdisc_bypass > 1) {
881 "%s: invalid bypass value\n",
889 if (framesize > blocksize) {
891 "%s: AF_PACKET MMAP frame size exceeds block size!\n",
896 blockcount = framecount / (blocksize / framesize);
899 "%s: invalid AF_PACKET MMAP parameters\n", name);
903 RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name);
904 RTE_LOG(INFO, PMD, "%s:\tblock size %d\n", name, blocksize);
905 RTE_LOG(INFO, PMD, "%s:\tblock count %d\n", name, blockcount);
906 RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize);
907 RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount);
909 if (rte_pmd_init_internals(dev, *sockfd, qpairs,
910 blocksize, blockcount,
911 framesize, framecount,
913 &internals, ð_dev,
917 eth_dev->rx_pkt_burst = eth_af_packet_rx;
918 eth_dev->tx_pkt_burst = eth_af_packet_tx;
924 rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
927 struct rte_kvargs *kvlist;
930 RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n",
931 rte_vdev_device_name(dev));
933 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
934 if (kvlist == NULL) {
940 * If iface argument is passed we open the NICs and use them for
943 if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) {
945 ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG,
946 &open_packet_iface, &sockfd);
951 if (dev->device.numa_node == SOCKET_ID_ANY)
952 dev->device.numa_node = rte_socket_id();
954 ret = rte_eth_from_packet(dev, &sockfd, kvlist);
955 close(sockfd); /* no longer needed */
958 rte_kvargs_free(kvlist);
963 rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
965 struct rte_eth_dev *eth_dev = NULL;
966 struct pmd_internals *internals;
969 RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n",
975 /* find the ethdev entry */
976 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
980 internals = eth_dev->data->dev_private;
981 for (q = 0; q < internals->nb_queues; q++) {
982 rte_free(internals->rx_queue[q].rd);
983 rte_free(internals->tx_queue[q].rd);
985 free(internals->if_name);
987 rte_free(eth_dev->data->dev_private);
988 rte_free(eth_dev->data);
990 rte_eth_dev_release_port(eth_dev);
995 static struct rte_vdev_driver pmd_af_packet_drv = {
996 .probe = rte_pmd_af_packet_probe,
997 .remove = rte_pmd_af_packet_remove,
1000 RTE_PMD_REGISTER_VDEV(net_af_packet, pmd_af_packet_drv);
1001 RTE_PMD_REGISTER_ALIAS(net_af_packet, eth_af_packet);
1002 RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
1008 "qdisc_bypass=<0|1>");