4 * Copyright(c) 2014 John W. Linville <linville@tuxdriver.com>
6 * Originally based upon librte_pmd_pcap code:
8 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2014 6WIND S.A.
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * * Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in
20 * the documentation and/or other materials provided with the
22 * * Neither the name of Intel Corporation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
29 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
32 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_ethdev.h>
41 #include <rte_ethdev_vdev.h>
42 #include <rte_malloc.h>
43 #include <rte_kvargs.h>
46 #include <linux/if_ether.h>
47 #include <linux/if_packet.h>
48 #include <arpa/inet.h>
50 #include <sys/types.h>
51 #include <sys/socket.h>
52 #include <sys/ioctl.h>
57 #define ETH_AF_PACKET_IFACE_ARG "iface"
58 #define ETH_AF_PACKET_NUM_Q_ARG "qpairs"
59 #define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz"
60 #define ETH_AF_PACKET_FRAMESIZE_ARG "framesz"
61 #define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt"
63 #define DFLT_BLOCK_SIZE (1 << 12)
64 #define DFLT_FRAME_SIZE (1 << 11)
65 #define DFLT_FRAME_COUNT (1 << 9)
67 #define RTE_PMD_AF_PACKET_MAX_RINGS 16
74 unsigned int framecount;
75 unsigned int framenum;
77 struct rte_mempool *mb_pool;
80 volatile unsigned long rx_pkts;
81 volatile unsigned long err_pkts;
82 volatile unsigned long rx_bytes;
87 unsigned int frame_data_size;
91 unsigned int framecount;
92 unsigned int framenum;
94 volatile unsigned long tx_pkts;
95 volatile unsigned long err_pkts;
96 volatile unsigned long tx_bytes;
99 struct pmd_internals {
104 struct ether_addr eth_addr;
106 struct tpacket_req req;
108 struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
109 struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
112 static const char *valid_arguments[] = {
113 ETH_AF_PACKET_IFACE_ARG,
114 ETH_AF_PACKET_NUM_Q_ARG,
115 ETH_AF_PACKET_BLOCKSIZE_ARG,
116 ETH_AF_PACKET_FRAMESIZE_ARG,
117 ETH_AF_PACKET_FRAMECOUNT_ARG,
121 static struct rte_eth_link pmd_link = {
122 .link_speed = ETH_SPEED_NUM_10G,
123 .link_duplex = ETH_LINK_FULL_DUPLEX,
124 .link_status = ETH_LINK_DOWN,
125 .link_autoneg = ETH_LINK_SPEED_AUTONEG
129 eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
132 struct tpacket2_hdr *ppd;
133 struct rte_mbuf *mbuf;
135 struct pkt_rx_queue *pkt_q = queue;
137 unsigned long num_rx_bytes = 0;
138 unsigned int framecount, framenum;
140 if (unlikely(nb_pkts == 0))
144 * Reads the given number of packets from the AF_PACKET socket one by
145 * one and copies the packet data into a newly allocated mbuf.
147 framecount = pkt_q->framecount;
148 framenum = pkt_q->framenum;
149 for (i = 0; i < nb_pkts; i++) {
150 /* point at the next incoming frame */
151 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
152 if ((ppd->tp_status & TP_STATUS_USER) == 0)
155 /* allocate the next mbuf */
156 mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool);
157 if (unlikely(mbuf == NULL))
160 /* packet will fit in the mbuf, go ahead and receive it */
161 rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) = ppd->tp_snaplen;
162 pbuf = (uint8_t *) ppd + ppd->tp_mac;
163 memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
165 /* check for vlan info */
166 if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
167 mbuf->vlan_tci = ppd->tp_vlan_tci;
168 mbuf->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
171 /* release incoming frame and advance ring buffer */
172 ppd->tp_status = TP_STATUS_KERNEL;
173 if (++framenum >= framecount)
175 mbuf->port = pkt_q->in_port;
177 /* account for the receive frame */
180 num_rx_bytes += mbuf->pkt_len;
182 pkt_q->framenum = framenum;
183 pkt_q->rx_pkts += num_rx;
184 pkt_q->rx_bytes += num_rx_bytes;
189 * Callback to handle sending packets through a real NIC.
192 eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
194 struct tpacket2_hdr *ppd;
195 struct rte_mbuf *mbuf;
197 unsigned int framecount, framenum;
199 struct pkt_tx_queue *pkt_q = queue;
201 unsigned long num_tx_bytes = 0;
204 if (unlikely(nb_pkts == 0))
207 memset(&pfd, 0, sizeof(pfd));
208 pfd.fd = pkt_q->sockfd;
209 pfd.events = POLLOUT;
212 framecount = pkt_q->framecount;
213 framenum = pkt_q->framenum;
214 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
215 for (i = 0; i < nb_pkts; i++) {
218 /* drop oversized packets */
219 if (mbuf->pkt_len > pkt_q->frame_data_size) {
220 rte_pktmbuf_free(mbuf);
224 /* insert vlan info if necessary */
225 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
226 if (rte_vlan_insert(&mbuf)) {
227 rte_pktmbuf_free(mbuf);
232 /* point at the next incoming frame */
233 if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
234 (poll(&pfd, 1, -1) < 0))
237 /* copy the tx frame data */
238 pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
239 sizeof(struct sockaddr_ll);
241 struct rte_mbuf *tmp_mbuf = mbuf;
243 uint16_t data_len = rte_pktmbuf_data_len(tmp_mbuf);
244 memcpy(pbuf, rte_pktmbuf_mtod(tmp_mbuf, void*), data_len);
246 tmp_mbuf = tmp_mbuf->next;
249 ppd->tp_len = mbuf->pkt_len;
250 ppd->tp_snaplen = mbuf->pkt_len;
252 /* release incoming frame and advance ring buffer */
253 ppd->tp_status = TP_STATUS_SEND_REQUEST;
254 if (++framenum >= framecount)
256 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
259 num_tx_bytes += mbuf->pkt_len;
260 rte_pktmbuf_free(mbuf);
263 /* kick-off transmits */
264 if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1) {
265 /* error sending -- no packets transmitted */
270 pkt_q->framenum = framenum;
271 pkt_q->tx_pkts += num_tx;
272 pkt_q->err_pkts += i - num_tx;
273 pkt_q->tx_bytes += num_tx_bytes;
278 eth_dev_start(struct rte_eth_dev *dev)
280 dev->data->dev_link.link_status = ETH_LINK_UP;
285 * This function gets called when the current port gets stopped.
288 eth_dev_stop(struct rte_eth_dev *dev)
292 struct pmd_internals *internals = dev->data->dev_private;
294 for (i = 0; i < internals->nb_queues; i++) {
295 sockfd = internals->rx_queue[i].sockfd;
299 /* Prevent use after free in case tx fd == rx fd */
300 if (sockfd != internals->tx_queue[i].sockfd) {
301 sockfd = internals->tx_queue[i].sockfd;
306 internals->rx_queue[i].sockfd = -1;
307 internals->tx_queue[i].sockfd = -1;
310 dev->data->dev_link.link_status = ETH_LINK_DOWN;
314 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
320 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
322 struct pmd_internals *internals = dev->data->dev_private;
324 dev_info->if_index = internals->if_index;
325 dev_info->max_mac_addrs = 1;
326 dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
327 dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
328 dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
329 dev_info->min_rx_bufsize = 0;
333 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
336 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
337 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
338 const struct pmd_internals *internal = dev->data->dev_private;
340 imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
341 internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
342 for (i = 0; i < imax; i++) {
343 igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
344 igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
345 rx_total += igb_stats->q_ipackets[i];
346 rx_bytes_total += igb_stats->q_ibytes[i];
349 imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
350 internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
351 for (i = 0; i < imax; i++) {
352 igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
353 igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
354 igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
355 tx_total += igb_stats->q_opackets[i];
356 tx_err_total += igb_stats->q_errors[i];
357 tx_bytes_total += igb_stats->q_obytes[i];
360 igb_stats->ipackets = rx_total;
361 igb_stats->ibytes = rx_bytes_total;
362 igb_stats->opackets = tx_total;
363 igb_stats->oerrors = tx_err_total;
364 igb_stats->obytes = tx_bytes_total;
368 eth_stats_reset(struct rte_eth_dev *dev)
371 struct pmd_internals *internal = dev->data->dev_private;
373 for (i = 0; i < internal->nb_queues; i++) {
374 internal->rx_queue[i].rx_pkts = 0;
375 internal->rx_queue[i].rx_bytes = 0;
378 for (i = 0; i < internal->nb_queues; i++) {
379 internal->tx_queue[i].tx_pkts = 0;
380 internal->tx_queue[i].err_pkts = 0;
381 internal->tx_queue[i].tx_bytes = 0;
386 eth_dev_close(struct rte_eth_dev *dev __rte_unused)
391 eth_queue_release(void *q __rte_unused)
396 eth_link_update(struct rte_eth_dev *dev __rte_unused,
397 int wait_to_complete __rte_unused)
403 eth_rx_queue_setup(struct rte_eth_dev *dev,
404 uint16_t rx_queue_id,
405 uint16_t nb_rx_desc __rte_unused,
406 unsigned int socket_id __rte_unused,
407 const struct rte_eth_rxconf *rx_conf __rte_unused,
408 struct rte_mempool *mb_pool)
410 struct pmd_internals *internals = dev->data->dev_private;
411 struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
412 unsigned int buf_size, data_size;
414 pkt_q->mb_pool = mb_pool;
416 /* Now get the space available for data in the mbuf */
417 buf_size = rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
418 RTE_PKTMBUF_HEADROOM;
419 data_size = internals->req.tp_frame_size;
420 data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll);
422 if (data_size > buf_size) {
424 "%s: %d bytes will not fit in mbuf (%d bytes)\n",
425 dev->device->name, data_size, buf_size);
429 dev->data->rx_queues[rx_queue_id] = pkt_q;
430 pkt_q->in_port = dev->data->port_id;
436 eth_tx_queue_setup(struct rte_eth_dev *dev,
437 uint16_t tx_queue_id,
438 uint16_t nb_tx_desc __rte_unused,
439 unsigned int socket_id __rte_unused,
440 const struct rte_eth_txconf *tx_conf __rte_unused)
443 struct pmd_internals *internals = dev->data->dev_private;
445 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
450 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
452 struct pmd_internals *internals = dev->data->dev_private;
453 struct ifreq ifr = { .ifr_mtu = mtu };
456 unsigned int data_size = internals->req.tp_frame_size -
458 sizeof(struct sockaddr_ll);
463 s = socket(PF_INET, SOCK_DGRAM, 0);
467 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", internals->if_name);
468 ret = ioctl(s, SIOCSIFMTU, &ifr);
478 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
483 s = socket(PF_INET, SOCK_DGRAM, 0);
487 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", if_name);
488 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0)
490 ifr.ifr_flags &= mask;
491 ifr.ifr_flags |= flags;
492 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0)
499 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
501 struct pmd_internals *internals = dev->data->dev_private;
503 eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
507 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
509 struct pmd_internals *internals = dev->data->dev_private;
511 eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
514 static const struct eth_dev_ops ops = {
515 .dev_start = eth_dev_start,
516 .dev_stop = eth_dev_stop,
517 .dev_close = eth_dev_close,
518 .dev_configure = eth_dev_configure,
519 .dev_infos_get = eth_dev_info,
520 .mtu_set = eth_dev_mtu_set,
521 .promiscuous_enable = eth_dev_promiscuous_enable,
522 .promiscuous_disable = eth_dev_promiscuous_disable,
523 .rx_queue_setup = eth_rx_queue_setup,
524 .tx_queue_setup = eth_tx_queue_setup,
525 .rx_queue_release = eth_queue_release,
526 .tx_queue_release = eth_queue_release,
527 .link_update = eth_link_update,
528 .stats_get = eth_stats_get,
529 .stats_reset = eth_stats_reset,
533 * Opens an AF_PACKET socket
536 open_packet_iface(const char *key __rte_unused,
537 const char *value __rte_unused,
540 int *sockfd = extra_args;
542 /* Open an AF_PACKET socket... */
543 *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
545 RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n");
552 static struct rte_vdev_driver pmd_af_packet_drv;
555 rte_pmd_init_internals(struct rte_vdev_device *dev,
557 const unsigned nb_queues,
558 unsigned int blocksize,
559 unsigned int blockcnt,
560 unsigned int framesize,
561 unsigned int framecnt,
562 struct pmd_internals **internals,
563 struct rte_eth_dev **eth_dev,
564 struct rte_kvargs *kvlist)
566 const char *name = rte_vdev_device_name(dev);
567 const unsigned int numa_node = dev->device.numa_node;
568 struct rte_eth_dev_data *data = NULL;
569 struct rte_kvargs_pair *pair = NULL;
573 struct sockaddr_ll sockaddr;
574 struct tpacket_req *req;
575 struct pkt_rx_queue *rx_queue;
576 struct pkt_tx_queue *tx_queue;
577 int rc, tpver, discard;
579 unsigned int i, q, rdsize;
580 #if defined(PACKET_FANOUT)
583 #if defined(PACKET_QDISC_BYPASS)
587 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
588 pair = &kvlist->pairs[k_idx];
589 if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL)
594 "%s: no interface specified for AF_PACKET ethdev\n",
600 "%s: creating AF_PACKET-backed ethdev on numa socket %u\n",
604 * now do all data allocation - for eth_dev structure, dummy pci driver
605 * and internal (private) data
607 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
611 *internals = rte_zmalloc_socket(name, sizeof(**internals),
613 if (*internals == NULL)
616 for (q = 0; q < nb_queues; q++) {
617 (*internals)->rx_queue[q].map = MAP_FAILED;
618 (*internals)->tx_queue[q].map = MAP_FAILED;
621 req = &((*internals)->req);
623 req->tp_block_size = blocksize;
624 req->tp_block_nr = blockcnt;
625 req->tp_frame_size = framesize;
626 req->tp_frame_nr = framecnt;
628 ifnamelen = strlen(pair->value);
629 if (ifnamelen < sizeof(ifr.ifr_name)) {
630 memcpy(ifr.ifr_name, pair->value, ifnamelen);
631 ifr.ifr_name[ifnamelen] = '\0';
634 "%s: I/F name too long (%s)\n",
638 if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
640 "%s: ioctl failed (SIOCGIFINDEX)\n",
644 (*internals)->if_name = strdup(pair->value);
645 if ((*internals)->if_name == NULL)
647 (*internals)->if_index = ifr.ifr_ifindex;
649 if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
651 "%s: ioctl failed (SIOCGIFHWADDR)\n",
655 memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
657 memset(&sockaddr, 0, sizeof(sockaddr));
658 sockaddr.sll_family = AF_PACKET;
659 sockaddr.sll_protocol = htons(ETH_P_ALL);
660 sockaddr.sll_ifindex = (*internals)->if_index;
662 #if defined(PACKET_FANOUT)
663 fanout_arg = (getpid() ^ (*internals)->if_index) & 0xffff;
664 fanout_arg |= (PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG) << 16;
665 #if defined(PACKET_FANOUT_FLAG_ROLLOVER)
666 fanout_arg |= PACKET_FANOUT_FLAG_ROLLOVER << 16;
670 for (q = 0; q < nb_queues; q++) {
671 /* Open an AF_PACKET socket for this queue... */
672 qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
675 "%s: could not open AF_PACKET socket\n",
681 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION,
682 &tpver, sizeof(tpver));
685 "%s: could not set PACKET_VERSION on AF_PACKET "
686 "socket for %s\n", name, pair->value);
691 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS,
692 &discard, sizeof(discard));
695 "%s: could not set PACKET_LOSS on "
696 "AF_PACKET socket for %s\n", name, pair->value);
700 #if defined(PACKET_QDISC_BYPASS)
702 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
703 &bypass, sizeof(bypass));
706 "%s: could not set PACKET_QDISC_BYPASS "
707 "on AF_PACKET socket for %s\n", name,
713 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
716 "%s: could not set PACKET_RX_RING on AF_PACKET "
717 "socket for %s\n", name, pair->value);
721 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req));
724 "%s: could not set PACKET_TX_RING on AF_PACKET "
725 "socket for %s\n", name, pair->value);
729 rx_queue = &((*internals)->rx_queue[q]);
730 rx_queue->framecount = req->tp_frame_nr;
732 rx_queue->map = mmap(NULL, 2 * req->tp_block_size * req->tp_block_nr,
733 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
735 if (rx_queue->map == MAP_FAILED) {
737 "%s: call to mmap failed on AF_PACKET socket for %s\n",
742 /* rdsize is same for both Tx and Rx */
743 rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd));
745 rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
746 if (rx_queue->rd == NULL)
748 for (i = 0; i < req->tp_frame_nr; ++i) {
749 rx_queue->rd[i].iov_base = rx_queue->map + (i * framesize);
750 rx_queue->rd[i].iov_len = req->tp_frame_size;
752 rx_queue->sockfd = qsockfd;
754 tx_queue = &((*internals)->tx_queue[q]);
755 tx_queue->framecount = req->tp_frame_nr;
756 tx_queue->frame_data_size = req->tp_frame_size;
757 tx_queue->frame_data_size -= TPACKET2_HDRLEN -
758 sizeof(struct sockaddr_ll);
760 tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr;
762 tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
763 if (tx_queue->rd == NULL)
765 for (i = 0; i < req->tp_frame_nr; ++i) {
766 tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize);
767 tx_queue->rd[i].iov_len = req->tp_frame_size;
769 tx_queue->sockfd = qsockfd;
771 rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
774 "%s: could not bind AF_PACKET socket to %s\n",
779 #if defined(PACKET_FANOUT)
780 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT,
781 &fanout_arg, sizeof(fanout_arg));
784 "%s: could not set PACKET_FANOUT on AF_PACKET socket "
785 "for %s\n", name, pair->value);
791 /* reserve an ethdev entry */
792 *eth_dev = rte_eth_vdev_allocate(dev, 0);
793 if (*eth_dev == NULL)
797 * now put it all together
798 * - store queue data in internals,
799 * - store numa_node in eth_dev
800 * - point eth_dev_data to internals
801 * - and point eth_dev structure to new eth_dev_data structure
804 (*internals)->nb_queues = nb_queues;
806 rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
807 data->dev_private = *internals;
808 data->nb_rx_queues = (uint16_t)nb_queues;
809 data->nb_tx_queues = (uint16_t)nb_queues;
810 data->dev_link = pmd_link;
811 data->mac_addrs = &(*internals)->eth_addr;
813 (*eth_dev)->data = data;
814 (*eth_dev)->dev_ops = &ops;
815 (*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
822 for (q = 0; q < nb_queues; q++) {
823 munmap((*internals)->rx_queue[q].map,
824 2 * req->tp_block_size * req->tp_block_nr);
826 rte_free((*internals)->rx_queue[q].rd);
827 rte_free((*internals)->tx_queue[q].rd);
828 if (((*internals)->rx_queue[q].sockfd != 0) &&
829 ((*internals)->rx_queue[q].sockfd != qsockfd))
830 close((*internals)->rx_queue[q].sockfd);
832 free((*internals)->if_name);
833 rte_free(*internals);
840 rte_eth_from_packet(struct rte_vdev_device *dev,
842 struct rte_kvargs *kvlist)
844 const char *name = rte_vdev_device_name(dev);
845 struct pmd_internals *internals = NULL;
846 struct rte_eth_dev *eth_dev = NULL;
847 struct rte_kvargs_pair *pair = NULL;
849 unsigned int blockcount;
850 unsigned int blocksize = DFLT_BLOCK_SIZE;
851 unsigned int framesize = DFLT_FRAME_SIZE;
852 unsigned int framecount = DFLT_FRAME_COUNT;
853 unsigned int qpairs = 1;
855 /* do some parameter checking */
860 * Walk arguments for configurable settings
862 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
863 pair = &kvlist->pairs[k_idx];
864 if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) {
865 qpairs = atoi(pair->value);
867 qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
869 "%s: invalid qpairs value\n",
875 if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
876 blocksize = atoi(pair->value);
879 "%s: invalid blocksize value\n",
885 if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
886 framesize = atoi(pair->value);
889 "%s: invalid framesize value\n",
895 if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
896 framecount = atoi(pair->value);
899 "%s: invalid framecount value\n",
907 if (framesize > blocksize) {
909 "%s: AF_PACKET MMAP frame size exceeds block size!\n",
914 blockcount = framecount / (blocksize / framesize);
917 "%s: invalid AF_PACKET MMAP parameters\n", name);
921 RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name);
922 RTE_LOG(INFO, PMD, "%s:\tblock size %d\n", name, blocksize);
923 RTE_LOG(INFO, PMD, "%s:\tblock count %d\n", name, blockcount);
924 RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize);
925 RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount);
927 if (rte_pmd_init_internals(dev, *sockfd, qpairs,
928 blocksize, blockcount,
929 framesize, framecount,
930 &internals, ð_dev,
934 eth_dev->rx_pkt_burst = eth_af_packet_rx;
935 eth_dev->tx_pkt_burst = eth_af_packet_tx;
941 rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
944 struct rte_kvargs *kvlist;
947 RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n",
948 rte_vdev_device_name(dev));
950 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
951 if (kvlist == NULL) {
957 * If iface argument is passed we open the NICs and use them for
960 if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) {
962 ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG,
963 &open_packet_iface, &sockfd);
968 if (dev->device.numa_node == SOCKET_ID_ANY)
969 dev->device.numa_node = rte_socket_id();
971 ret = rte_eth_from_packet(dev, &sockfd, kvlist);
972 close(sockfd); /* no longer needed */
975 rte_kvargs_free(kvlist);
980 rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
982 struct rte_eth_dev *eth_dev = NULL;
983 struct pmd_internals *internals;
986 RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n",
992 /* find the ethdev entry */
993 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
997 internals = eth_dev->data->dev_private;
998 for (q = 0; q < internals->nb_queues; q++) {
999 rte_free(internals->rx_queue[q].rd);
1000 rte_free(internals->tx_queue[q].rd);
1002 free(internals->if_name);
1004 rte_free(eth_dev->data->dev_private);
1005 rte_free(eth_dev->data);
1007 rte_eth_dev_release_port(eth_dev);
1012 static struct rte_vdev_driver pmd_af_packet_drv = {
1013 .probe = rte_pmd_af_packet_probe,
1014 .remove = rte_pmd_af_packet_remove,
1017 RTE_PMD_REGISTER_VDEV(net_af_packet, pmd_af_packet_drv);
1018 RTE_PMD_REGISTER_ALIAS(net_af_packet, eth_af_packet);
1019 RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,