4 * Copyright(c) 2014 John W. Linville <linville@tuxdriver.com>
6 * Originally based upon librte_pmd_pcap code:
8 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2014 6WIND S.A.
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * * Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in
20 * the documentation and/or other materials provided with the
22 * * Neither the name of Intel Corporation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
29 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
32 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_ethdev.h>
41 #include <rte_ethdev_vdev.h>
42 #include <rte_malloc.h>
43 #include <rte_kvargs.h>
46 #include <linux/if_ether.h>
47 #include <linux/if_packet.h>
48 #include <arpa/inet.h>
50 #include <sys/types.h>
51 #include <sys/socket.h>
52 #include <sys/ioctl.h>
57 #define ETH_AF_PACKET_IFACE_ARG "iface"
58 #define ETH_AF_PACKET_NUM_Q_ARG "qpairs"
59 #define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz"
60 #define ETH_AF_PACKET_FRAMESIZE_ARG "framesz"
61 #define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt"
62 #define ETH_AF_PACKET_QDISC_BYPASS_ARG "qdisc_bypass"
64 #define DFLT_BLOCK_SIZE (1 << 12)
65 #define DFLT_FRAME_SIZE (1 << 11)
66 #define DFLT_FRAME_COUNT (1 << 9)
68 #define RTE_PMD_AF_PACKET_MAX_RINGS 16
75 unsigned int framecount;
76 unsigned int framenum;
78 struct rte_mempool *mb_pool;
81 volatile unsigned long rx_pkts;
82 volatile unsigned long err_pkts;
83 volatile unsigned long rx_bytes;
88 unsigned int frame_data_size;
92 unsigned int framecount;
93 unsigned int framenum;
95 volatile unsigned long tx_pkts;
96 volatile unsigned long err_pkts;
97 volatile unsigned long tx_bytes;
100 struct pmd_internals {
105 struct ether_addr eth_addr;
107 struct tpacket_req req;
109 struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
110 struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
113 static const char *valid_arguments[] = {
114 ETH_AF_PACKET_IFACE_ARG,
115 ETH_AF_PACKET_NUM_Q_ARG,
116 ETH_AF_PACKET_BLOCKSIZE_ARG,
117 ETH_AF_PACKET_FRAMESIZE_ARG,
118 ETH_AF_PACKET_FRAMECOUNT_ARG,
119 ETH_AF_PACKET_QDISC_BYPASS_ARG,
123 static struct rte_eth_link pmd_link = {
124 .link_speed = ETH_SPEED_NUM_10G,
125 .link_duplex = ETH_LINK_FULL_DUPLEX,
126 .link_status = ETH_LINK_DOWN,
127 .link_autoneg = ETH_LINK_SPEED_AUTONEG
131 eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
134 struct tpacket2_hdr *ppd;
135 struct rte_mbuf *mbuf;
137 struct pkt_rx_queue *pkt_q = queue;
139 unsigned long num_rx_bytes = 0;
140 unsigned int framecount, framenum;
142 if (unlikely(nb_pkts == 0))
146 * Reads the given number of packets from the AF_PACKET socket one by
147 * one and copies the packet data into a newly allocated mbuf.
149 framecount = pkt_q->framecount;
150 framenum = pkt_q->framenum;
151 for (i = 0; i < nb_pkts; i++) {
152 /* point at the next incoming frame */
153 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
154 if ((ppd->tp_status & TP_STATUS_USER) == 0)
157 /* allocate the next mbuf */
158 mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool);
159 if (unlikely(mbuf == NULL))
162 /* packet will fit in the mbuf, go ahead and receive it */
163 rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) = ppd->tp_snaplen;
164 pbuf = (uint8_t *) ppd + ppd->tp_mac;
165 memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
167 /* check for vlan info */
168 if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
169 mbuf->vlan_tci = ppd->tp_vlan_tci;
170 mbuf->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
173 /* release incoming frame and advance ring buffer */
174 ppd->tp_status = TP_STATUS_KERNEL;
175 if (++framenum >= framecount)
177 mbuf->port = pkt_q->in_port;
179 /* account for the receive frame */
182 num_rx_bytes += mbuf->pkt_len;
184 pkt_q->framenum = framenum;
185 pkt_q->rx_pkts += num_rx;
186 pkt_q->rx_bytes += num_rx_bytes;
191 * Callback to handle sending packets through a real NIC.
194 eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
196 struct tpacket2_hdr *ppd;
197 struct rte_mbuf *mbuf;
199 unsigned int framecount, framenum;
201 struct pkt_tx_queue *pkt_q = queue;
203 unsigned long num_tx_bytes = 0;
206 if (unlikely(nb_pkts == 0))
209 memset(&pfd, 0, sizeof(pfd));
210 pfd.fd = pkt_q->sockfd;
211 pfd.events = POLLOUT;
214 framecount = pkt_q->framecount;
215 framenum = pkt_q->framenum;
216 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
217 for (i = 0; i < nb_pkts; i++) {
220 /* drop oversized packets */
221 if (mbuf->pkt_len > pkt_q->frame_data_size) {
222 rte_pktmbuf_free(mbuf);
226 /* insert vlan info if necessary */
227 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
228 if (rte_vlan_insert(&mbuf)) {
229 rte_pktmbuf_free(mbuf);
234 /* point at the next incoming frame */
235 if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
236 (poll(&pfd, 1, -1) < 0))
239 /* copy the tx frame data */
240 pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
241 sizeof(struct sockaddr_ll);
243 struct rte_mbuf *tmp_mbuf = mbuf;
245 uint16_t data_len = rte_pktmbuf_data_len(tmp_mbuf);
246 memcpy(pbuf, rte_pktmbuf_mtod(tmp_mbuf, void*), data_len);
248 tmp_mbuf = tmp_mbuf->next;
251 ppd->tp_len = mbuf->pkt_len;
252 ppd->tp_snaplen = mbuf->pkt_len;
254 /* release incoming frame and advance ring buffer */
255 ppd->tp_status = TP_STATUS_SEND_REQUEST;
256 if (++framenum >= framecount)
258 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
261 num_tx_bytes += mbuf->pkt_len;
262 rte_pktmbuf_free(mbuf);
265 /* kick-off transmits */
266 if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1) {
267 /* error sending -- no packets transmitted */
272 pkt_q->framenum = framenum;
273 pkt_q->tx_pkts += num_tx;
274 pkt_q->err_pkts += i - num_tx;
275 pkt_q->tx_bytes += num_tx_bytes;
280 eth_dev_start(struct rte_eth_dev *dev)
282 dev->data->dev_link.link_status = ETH_LINK_UP;
287 * This function gets called when the current port gets stopped.
290 eth_dev_stop(struct rte_eth_dev *dev)
294 struct pmd_internals *internals = dev->data->dev_private;
296 for (i = 0; i < internals->nb_queues; i++) {
297 sockfd = internals->rx_queue[i].sockfd;
301 /* Prevent use after free in case tx fd == rx fd */
302 if (sockfd != internals->tx_queue[i].sockfd) {
303 sockfd = internals->tx_queue[i].sockfd;
308 internals->rx_queue[i].sockfd = -1;
309 internals->tx_queue[i].sockfd = -1;
312 dev->data->dev_link.link_status = ETH_LINK_DOWN;
316 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
322 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
324 struct pmd_internals *internals = dev->data->dev_private;
326 dev_info->if_index = internals->if_index;
327 dev_info->max_mac_addrs = 1;
328 dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
329 dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
330 dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
331 dev_info->min_rx_bufsize = 0;
335 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
338 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
339 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
340 const struct pmd_internals *internal = dev->data->dev_private;
342 imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
343 internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
344 for (i = 0; i < imax; i++) {
345 igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
346 igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
347 rx_total += igb_stats->q_ipackets[i];
348 rx_bytes_total += igb_stats->q_ibytes[i];
351 imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
352 internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
353 for (i = 0; i < imax; i++) {
354 igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
355 igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
356 igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
357 tx_total += igb_stats->q_opackets[i];
358 tx_err_total += igb_stats->q_errors[i];
359 tx_bytes_total += igb_stats->q_obytes[i];
362 igb_stats->ipackets = rx_total;
363 igb_stats->ibytes = rx_bytes_total;
364 igb_stats->opackets = tx_total;
365 igb_stats->oerrors = tx_err_total;
366 igb_stats->obytes = tx_bytes_total;
370 eth_stats_reset(struct rte_eth_dev *dev)
373 struct pmd_internals *internal = dev->data->dev_private;
375 for (i = 0; i < internal->nb_queues; i++) {
376 internal->rx_queue[i].rx_pkts = 0;
377 internal->rx_queue[i].rx_bytes = 0;
380 for (i = 0; i < internal->nb_queues; i++) {
381 internal->tx_queue[i].tx_pkts = 0;
382 internal->tx_queue[i].err_pkts = 0;
383 internal->tx_queue[i].tx_bytes = 0;
388 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
393 eth_queue_release(void *q __rte_unused)
398 eth_link_update(struct rte_eth_dev *dev __rte_unused,
399 int wait_to_complete __rte_unused)
405 eth_rx_queue_setup(struct rte_eth_dev *dev,
406 uint16_t rx_queue_id,
407 uint16_t nb_rx_desc __rte_unused,
408 unsigned int socket_id __rte_unused,
409 const struct rte_eth_rxconf *rx_conf __rte_unused,
410 struct rte_mempool *mb_pool)
412 struct pmd_internals *internals = dev->data->dev_private;
413 struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
414 unsigned int buf_size, data_size;
416 pkt_q->mb_pool = mb_pool;
418 /* Now get the space available for data in the mbuf */
419 buf_size = rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
420 RTE_PKTMBUF_HEADROOM;
421 data_size = internals->req.tp_frame_size;
422 data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll);
424 if (data_size > buf_size) {
426 "%s: %d bytes will not fit in mbuf (%d bytes)\n",
427 dev->device->name, data_size, buf_size);
431 dev->data->rx_queues[rx_queue_id] = pkt_q;
432 pkt_q->in_port = dev->data->port_id;
438 eth_tx_queue_setup(struct rte_eth_dev *dev,
439 uint16_t tx_queue_id,
440 uint16_t nb_tx_desc __rte_unused,
441 unsigned int socket_id __rte_unused,
442 const struct rte_eth_txconf *tx_conf __rte_unused)
445 struct pmd_internals *internals = dev->data->dev_private;
447 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
452 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
454 struct pmd_internals *internals = dev->data->dev_private;
455 struct ifreq ifr = { .ifr_mtu = mtu };
458 unsigned int data_size = internals->req.tp_frame_size -
460 sizeof(struct sockaddr_ll);
465 s = socket(PF_INET, SOCK_DGRAM, 0);
469 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", internals->if_name);
470 ret = ioctl(s, SIOCSIFMTU, &ifr);
480 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
485 s = socket(PF_INET, SOCK_DGRAM, 0);
489 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", if_name);
490 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0)
492 ifr.ifr_flags &= mask;
493 ifr.ifr_flags |= flags;
494 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0)
501 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
503 struct pmd_internals *internals = dev->data->dev_private;
505 eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
509 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
511 struct pmd_internals *internals = dev->data->dev_private;
513 eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
516 static const struct eth_dev_ops ops = {
517 .dev_start = eth_dev_start,
518 .dev_stop = eth_dev_stop,
519 .dev_close = eth_dev_close,
520 .dev_configure = eth_dev_configure,
521 .dev_infos_get = eth_dev_info,
522 .mtu_set = eth_dev_mtu_set,
523 .promiscuous_enable = eth_dev_promiscuous_enable,
524 .promiscuous_disable = eth_dev_promiscuous_disable,
525 .rx_queue_setup = eth_rx_queue_setup,
526 .tx_queue_setup = eth_tx_queue_setup,
527 .rx_queue_release = eth_queue_release,
528 .tx_queue_release = eth_queue_release,
529 .link_update = eth_link_update,
530 .stats_get = eth_stats_get,
531 .stats_reset = eth_stats_reset,
535 * Opens an AF_PACKET socket
538 open_packet_iface(const char *key __rte_unused,
539 const char *value __rte_unused,
542 int *sockfd = extra_args;
544 /* Open an AF_PACKET socket... */
545 *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
547 RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n");
554 static struct rte_vdev_driver pmd_af_packet_drv;
557 rte_pmd_init_internals(struct rte_vdev_device *dev,
559 const unsigned nb_queues,
560 unsigned int blocksize,
561 unsigned int blockcnt,
562 unsigned int framesize,
563 unsigned int framecnt,
564 unsigned int qdisc_bypass,
565 struct pmd_internals **internals,
566 struct rte_eth_dev **eth_dev,
567 struct rte_kvargs *kvlist)
569 const char *name = rte_vdev_device_name(dev);
570 const unsigned int numa_node = dev->device.numa_node;
571 struct rte_eth_dev_data *data = NULL;
572 struct rte_kvargs_pair *pair = NULL;
576 struct sockaddr_ll sockaddr;
577 struct tpacket_req *req;
578 struct pkt_rx_queue *rx_queue;
579 struct pkt_tx_queue *tx_queue;
580 int rc, tpver, discard;
582 unsigned int i, q, rdsize;
583 #if defined(PACKET_FANOUT)
587 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
588 pair = &kvlist->pairs[k_idx];
589 if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL)
594 "%s: no interface specified for AF_PACKET ethdev\n",
600 "%s: creating AF_PACKET-backed ethdev on numa socket %u\n",
604 * now do all data allocation - for eth_dev structure, dummy pci driver
605 * and internal (private) data
607 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
611 *internals = rte_zmalloc_socket(name, sizeof(**internals),
613 if (*internals == NULL)
616 for (q = 0; q < nb_queues; q++) {
617 (*internals)->rx_queue[q].map = MAP_FAILED;
618 (*internals)->tx_queue[q].map = MAP_FAILED;
621 req = &((*internals)->req);
623 req->tp_block_size = blocksize;
624 req->tp_block_nr = blockcnt;
625 req->tp_frame_size = framesize;
626 req->tp_frame_nr = framecnt;
628 ifnamelen = strlen(pair->value);
629 if (ifnamelen < sizeof(ifr.ifr_name)) {
630 memcpy(ifr.ifr_name, pair->value, ifnamelen);
631 ifr.ifr_name[ifnamelen] = '\0';
634 "%s: I/F name too long (%s)\n",
638 if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
640 "%s: ioctl failed (SIOCGIFINDEX)\n",
644 (*internals)->if_name = strdup(pair->value);
645 if ((*internals)->if_name == NULL)
647 (*internals)->if_index = ifr.ifr_ifindex;
649 if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
651 "%s: ioctl failed (SIOCGIFHWADDR)\n",
655 memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
657 memset(&sockaddr, 0, sizeof(sockaddr));
658 sockaddr.sll_family = AF_PACKET;
659 sockaddr.sll_protocol = htons(ETH_P_ALL);
660 sockaddr.sll_ifindex = (*internals)->if_index;
662 #if defined(PACKET_FANOUT)
663 fanout_arg = (getpid() ^ (*internals)->if_index) & 0xffff;
664 fanout_arg |= (PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG) << 16;
665 #if defined(PACKET_FANOUT_FLAG_ROLLOVER)
666 fanout_arg |= PACKET_FANOUT_FLAG_ROLLOVER << 16;
670 for (q = 0; q < nb_queues; q++) {
671 /* Open an AF_PACKET socket for this queue... */
672 qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
675 "%s: could not open AF_PACKET socket\n",
681 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION,
682 &tpver, sizeof(tpver));
685 "%s: could not set PACKET_VERSION on AF_PACKET "
686 "socket for %s\n", name, pair->value);
691 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS,
692 &discard, sizeof(discard));
695 "%s: could not set PACKET_LOSS on "
696 "AF_PACKET socket for %s\n", name, pair->value);
700 #if defined(PACKET_QDISC_BYPASS)
701 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
702 &qdisc_bypass, sizeof(qdisc_bypass));
705 "%s: could not set PACKET_QDISC_BYPASS "
706 "on AF_PACKET socket for %s\n", name,
711 RTE_SET_USED(qdisc_bypass);
714 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
717 "%s: could not set PACKET_RX_RING on AF_PACKET "
718 "socket for %s\n", name, pair->value);
722 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req));
725 "%s: could not set PACKET_TX_RING on AF_PACKET "
726 "socket for %s\n", name, pair->value);
730 rx_queue = &((*internals)->rx_queue[q]);
731 rx_queue->framecount = req->tp_frame_nr;
733 rx_queue->map = mmap(NULL, 2 * req->tp_block_size * req->tp_block_nr,
734 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
736 if (rx_queue->map == MAP_FAILED) {
738 "%s: call to mmap failed on AF_PACKET socket for %s\n",
743 /* rdsize is same for both Tx and Rx */
744 rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd));
746 rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
747 if (rx_queue->rd == NULL)
749 for (i = 0; i < req->tp_frame_nr; ++i) {
750 rx_queue->rd[i].iov_base = rx_queue->map + (i * framesize);
751 rx_queue->rd[i].iov_len = req->tp_frame_size;
753 rx_queue->sockfd = qsockfd;
755 tx_queue = &((*internals)->tx_queue[q]);
756 tx_queue->framecount = req->tp_frame_nr;
757 tx_queue->frame_data_size = req->tp_frame_size;
758 tx_queue->frame_data_size -= TPACKET2_HDRLEN -
759 sizeof(struct sockaddr_ll);
761 tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr;
763 tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
764 if (tx_queue->rd == NULL)
766 for (i = 0; i < req->tp_frame_nr; ++i) {
767 tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize);
768 tx_queue->rd[i].iov_len = req->tp_frame_size;
770 tx_queue->sockfd = qsockfd;
772 rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
775 "%s: could not bind AF_PACKET socket to %s\n",
780 #if defined(PACKET_FANOUT)
781 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT,
782 &fanout_arg, sizeof(fanout_arg));
785 "%s: could not set PACKET_FANOUT on AF_PACKET socket "
786 "for %s\n", name, pair->value);
792 /* reserve an ethdev entry */
793 *eth_dev = rte_eth_vdev_allocate(dev, 0);
794 if (*eth_dev == NULL)
798 * now put it all together
799 * - store queue data in internals,
800 * - store numa_node in eth_dev
801 * - point eth_dev_data to internals
802 * - and point eth_dev structure to new eth_dev_data structure
805 (*internals)->nb_queues = nb_queues;
807 rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
808 data->dev_private = *internals;
809 data->nb_rx_queues = (uint16_t)nb_queues;
810 data->nb_tx_queues = (uint16_t)nb_queues;
811 data->dev_link = pmd_link;
812 data->mac_addrs = &(*internals)->eth_addr;
814 (*eth_dev)->data = data;
815 (*eth_dev)->dev_ops = &ops;
816 (*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
823 for (q = 0; q < nb_queues; q++) {
824 munmap((*internals)->rx_queue[q].map,
825 2 * req->tp_block_size * req->tp_block_nr);
827 rte_free((*internals)->rx_queue[q].rd);
828 rte_free((*internals)->tx_queue[q].rd);
829 if (((*internals)->rx_queue[q].sockfd != 0) &&
830 ((*internals)->rx_queue[q].sockfd != qsockfd))
831 close((*internals)->rx_queue[q].sockfd);
833 free((*internals)->if_name);
834 rte_free(*internals);
841 rte_eth_from_packet(struct rte_vdev_device *dev,
843 struct rte_kvargs *kvlist)
845 const char *name = rte_vdev_device_name(dev);
846 struct pmd_internals *internals = NULL;
847 struct rte_eth_dev *eth_dev = NULL;
848 struct rte_kvargs_pair *pair = NULL;
850 unsigned int blockcount;
851 unsigned int blocksize = DFLT_BLOCK_SIZE;
852 unsigned int framesize = DFLT_FRAME_SIZE;
853 unsigned int framecount = DFLT_FRAME_COUNT;
854 unsigned int qpairs = 1;
855 unsigned int qdisc_bypass = 1;
857 /* do some parameter checking */
862 * Walk arguments for configurable settings
864 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
865 pair = &kvlist->pairs[k_idx];
866 if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) {
867 qpairs = atoi(pair->value);
869 qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
871 "%s: invalid qpairs value\n",
877 if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
878 blocksize = atoi(pair->value);
881 "%s: invalid blocksize value\n",
887 if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
888 framesize = atoi(pair->value);
891 "%s: invalid framesize value\n",
897 if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
898 framecount = atoi(pair->value);
901 "%s: invalid framecount value\n",
907 if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) {
908 qdisc_bypass = atoi(pair->value);
909 if (qdisc_bypass > 1) {
911 "%s: invalid bypass value\n",
919 if (framesize > blocksize) {
921 "%s: AF_PACKET MMAP frame size exceeds block size!\n",
926 blockcount = framecount / (blocksize / framesize);
929 "%s: invalid AF_PACKET MMAP parameters\n", name);
933 RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name);
934 RTE_LOG(INFO, PMD, "%s:\tblock size %d\n", name, blocksize);
935 RTE_LOG(INFO, PMD, "%s:\tblock count %d\n", name, blockcount);
936 RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize);
937 RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount);
939 if (rte_pmd_init_internals(dev, *sockfd, qpairs,
940 blocksize, blockcount,
941 framesize, framecount,
943 &internals, ð_dev,
947 eth_dev->rx_pkt_burst = eth_af_packet_rx;
948 eth_dev->tx_pkt_burst = eth_af_packet_tx;
954 rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
957 struct rte_kvargs *kvlist;
960 RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n",
961 rte_vdev_device_name(dev));
963 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
964 if (kvlist == NULL) {
970 * If iface argument is passed we open the NICs and use them for
973 if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) {
975 ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG,
976 &open_packet_iface, &sockfd);
981 if (dev->device.numa_node == SOCKET_ID_ANY)
982 dev->device.numa_node = rte_socket_id();
984 ret = rte_eth_from_packet(dev, &sockfd, kvlist);
985 close(sockfd); /* no longer needed */
988 rte_kvargs_free(kvlist);
993 rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
995 struct rte_eth_dev *eth_dev = NULL;
996 struct pmd_internals *internals;
999 RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n",
1005 /* find the ethdev entry */
1006 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1007 if (eth_dev == NULL)
1010 internals = eth_dev->data->dev_private;
1011 for (q = 0; q < internals->nb_queues; q++) {
1012 rte_free(internals->rx_queue[q].rd);
1013 rte_free(internals->tx_queue[q].rd);
1015 free(internals->if_name);
1017 rte_free(eth_dev->data->dev_private);
1018 rte_free(eth_dev->data);
1020 rte_eth_dev_release_port(eth_dev);
1025 static struct rte_vdev_driver pmd_af_packet_drv = {
1026 .probe = rte_pmd_af_packet_probe,
1027 .remove = rte_pmd_af_packet_remove,
1030 RTE_PMD_REGISTER_VDEV(net_af_packet, pmd_af_packet_drv);
1031 RTE_PMD_REGISTER_ALIAS(net_af_packet, eth_af_packet);
1032 RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
1038 "qdisc_bypass=<0|1>");