4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_atomic.h>
35 #include <rte_branch_prediction.h>
36 #include <rte_common.h>
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
42 #include <rte_kvargs.h>
44 #include <rte_debug.h>
46 #include <sys/types.h>
48 #include <sys/socket.h>
49 #include <sys/ioctl.h>
50 #include <sys/utsname.h>
57 #include <arpa/inet.h>
59 #include <linux/if_tun.h>
60 #include <linux/if_ether.h>
61 #include <linux/version.h>
64 #include <rte_eth_tap.h>
66 #include <tap_netlink.h>
67 #include <tap_tcmsgs.h>
69 /* Linux based path to the TUN device */
70 #define TUN_TAP_DEV_PATH "/dev/net/tun"
71 #define DEFAULT_TAP_NAME "dtap"
73 #define ETH_TAP_IFACE_ARG "iface"
74 #define ETH_TAP_SPEED_ARG "speed"
75 #define ETH_TAP_REMOTE_ARG "remote"
76 #define ETH_TAP_MAC_ARG "mac"
77 #define ETH_TAP_MAC_FIXED "fixed"
79 #define FLOWER_KERNEL_VERSION KERNEL_VERSION(4, 2, 0)
80 #define FLOWER_VLAN_KERNEL_VERSION KERNEL_VERSION(4, 9, 0)
82 static struct rte_vdev_driver pmd_tap_drv;
84 static const char *valid_arguments[] = {
94 static volatile uint32_t tap_trigger; /* Rx trigger */
96 static struct rte_eth_link pmd_link = {
97 .link_speed = ETH_SPEED_NUM_10G,
98 .link_duplex = ETH_LINK_FULL_DUPLEX,
99 .link_status = ETH_LINK_DOWN,
100 .link_autoneg = ETH_LINK_SPEED_AUTONEG
104 tap_trigger_cb(int sig __rte_unused)
106 /* Valid trigger values are nonzero */
107 tap_trigger = (tap_trigger + 1) | 0x80000000;
110 /* Specifies on what netdevices the ioctl should be applied */
117 static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
119 /* Tun/Tap allocation routine
121 * name is the number of the interface to use, unless NULL to take the host
125 tun_alloc(struct pmd_internals *pmd)
128 #ifdef IFF_MULTI_QUEUE
129 unsigned int features;
133 memset(&ifr, 0, sizeof(struct ifreq));
136 * Do not set IFF_NO_PI as packet information header will be needed
137 * to check if a received packet has been truncated.
139 ifr.ifr_flags = IFF_TAP;
140 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
142 RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name);
144 fd = open(TUN_TAP_DEV_PATH, O_RDWR);
146 RTE_LOG(ERR, PMD, "Unable to create TAP interface");
150 #ifdef IFF_MULTI_QUEUE
151 /* Grab the TUN features to verify we can work multi-queue */
152 if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
153 RTE_LOG(ERR, PMD, "TAP unable to get TUN/TAP features\n");
156 RTE_LOG(DEBUG, PMD, " TAP Features %08x\n", features);
158 if (features & IFF_MULTI_QUEUE) {
159 RTE_LOG(DEBUG, PMD, " Multi-queue support for %d queues\n",
160 RTE_PMD_TAP_MAX_QUEUES);
161 ifr.ifr_flags |= IFF_MULTI_QUEUE;
165 ifr.ifr_flags |= IFF_ONE_QUEUE;
166 RTE_LOG(DEBUG, PMD, " Single queue only support\n");
169 /* Set the TUN/TAP configuration and set the name if needed */
170 if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
171 RTE_LOG(WARNING, PMD,
172 "Unable to set TUNSETIFF for %s\n",
178 /* Always set the file descriptor to non-blocking */
179 if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
180 RTE_LOG(WARNING, PMD,
181 "Unable to set %s to nonblocking\n",
183 perror("F_SETFL, NONBLOCK");
187 /* Set up trigger to optimize empty Rx bursts */
191 int flags = fcntl(fd, F_GETFL);
193 if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
195 if (sa.sa_handler != tap_trigger_cb) {
197 * Make sure SIGIO is not already taken. This is done
198 * as late as possible to leave the application a
199 * chance to set up its own signal handler first.
201 if (sa.sa_handler != SIG_IGN &&
202 sa.sa_handler != SIG_DFL) {
206 sa = (struct sigaction){
207 .sa_flags = SA_RESTART,
208 .sa_handler = tap_trigger_cb,
210 if (sigaction(SIGIO, &sa, NULL) == -1)
213 /* Enable SIGIO on file descriptor */
214 fcntl(fd, F_SETFL, flags | O_ASYNC);
215 fcntl(fd, F_SETOWN, getpid());
218 /* Disable trigger globally in case of error */
220 RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n",
232 /* Callback to handle the rx burst of packets to the correct interface and
233 * file descriptor(s) in a multi-queue setup.
236 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
238 struct rx_queue *rxq = queue;
240 unsigned long num_rx_bytes = 0;
241 uint32_t trigger = tap_trigger;
243 if (trigger == rxq->trigger_seen)
246 rxq->trigger_seen = trigger;
247 rte_compiler_barrier();
248 for (num_rx = 0; num_rx < nb_pkts; ) {
249 struct rte_mbuf *mbuf = rxq->pool;
250 struct rte_mbuf *seg = NULL;
251 struct rte_mbuf *new_tail = NULL;
252 uint16_t data_off = rte_pktmbuf_headroom(mbuf);
255 len = readv(rxq->fd, *rxq->iovecs,
256 1 + (rxq->rxmode->enable_scatter ?
257 rxq->nb_rx_desc : 1));
258 if (len < (int)sizeof(struct tun_pi))
261 /* Packet couldn't fit in the provided mbuf */
262 if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
263 rxq->stats.ierrors++;
267 len -= sizeof(struct tun_pi);
270 mbuf->port = rxq->in_port;
272 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
274 if (unlikely(!buf)) {
275 rxq->stats.rx_nombuf++;
276 /* No new buf has been allocated: do nothing */
277 if (!new_tail || !seg)
281 rte_pktmbuf_free(mbuf);
285 seg = seg ? seg->next : mbuf;
286 if (rxq->pool == mbuf)
289 new_tail->next = buf;
291 new_tail->next = seg->next;
293 /* iovecs[0] is reserved for packet info (pi) */
294 (*rxq->iovecs)[mbuf->nb_segs].iov_len =
295 buf->buf_len - data_off;
296 (*rxq->iovecs)[mbuf->nb_segs].iov_base =
297 (char *)buf->buf_addr + data_off;
299 seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
300 seg->data_off = data_off;
302 len -= seg->data_len;
306 /* First segment has headroom, not the others */
310 mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
313 /* account for the receive frame */
314 bufs[num_rx++] = mbuf;
315 num_rx_bytes += mbuf->pkt_len;
318 rxq->stats.ipackets += num_rx;
319 rxq->stats.ibytes += num_rx_bytes;
324 /* Callback to handle sending packets from the tap interface
327 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
329 struct tx_queue *txq = queue;
331 unsigned long num_tx_bytes = 0;
335 if (unlikely(nb_pkts == 0))
338 max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
339 for (i = 0; i < nb_pkts; i++) {
340 struct rte_mbuf *mbuf = bufs[num_tx];
341 struct iovec iovecs[mbuf->nb_segs + 1];
342 struct tun_pi pi = { .flags = 0 };
343 struct rte_mbuf *seg = mbuf;
347 /* stats.errs will be incremented */
348 if (rte_pktmbuf_pkt_len(mbuf) > max_size)
351 iovecs[0].iov_base = π
352 iovecs[0].iov_len = sizeof(pi);
353 for (j = 1; j <= mbuf->nb_segs; j++) {
354 iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
356 rte_pktmbuf_mtod(seg, void *);
359 /* copy the tx frame data */
360 n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
365 num_tx_bytes += mbuf->pkt_len;
366 rte_pktmbuf_free(mbuf);
369 txq->stats.opackets += num_tx;
370 txq->stats.errs += nb_pkts - num_tx;
371 txq->stats.obytes += num_tx_bytes;
377 tap_ioctl_req2str(unsigned long request)
381 return "SIOCSIFFLAGS";
383 return "SIOCGIFFLAGS";
385 return "SIOCGIFHWADDR";
387 return "SIOCSIFHWADDR";
395 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
396 struct ifreq *ifr, int set, enum ioctl_mode mode)
398 short req_flags = ifr->ifr_flags;
399 int remote = pmd->remote_if_index &&
400 (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
402 if (!pmd->remote_if_index && mode == REMOTE_ONLY)
405 * If there is a remote netdevice, apply ioctl on it, then apply it on
410 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
411 else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
412 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
415 /* fetch current flags to leave other flags untouched */
416 if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
419 ifr->ifr_flags |= req_flags;
421 ifr->ifr_flags &= ~req_flags;
429 RTE_ASSERT(!"unsupported request type: must not happen");
431 if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
433 if (remote-- && mode == LOCAL_AND_REMOTE)
438 RTE_LOG(DEBUG, PMD, "%s: %s(%s) failed: %s(%d)\n", ifr->ifr_name,
439 __func__, tap_ioctl_req2str(request), strerror(errno), errno);
444 tap_link_set_down(struct rte_eth_dev *dev)
446 struct pmd_internals *pmd = dev->data->dev_private;
447 struct ifreq ifr = { .ifr_flags = IFF_UP };
449 dev->data->dev_link.link_status = ETH_LINK_DOWN;
450 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
454 tap_link_set_up(struct rte_eth_dev *dev)
456 struct pmd_internals *pmd = dev->data->dev_private;
457 struct ifreq ifr = { .ifr_flags = IFF_UP };
459 dev->data->dev_link.link_status = ETH_LINK_UP;
460 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
464 tap_dev_start(struct rte_eth_dev *dev)
468 err = tap_intr_handle_set(dev, 1);
471 return tap_link_set_up(dev);
474 /* This function gets called when the current port gets stopped.
477 tap_dev_stop(struct rte_eth_dev *dev)
479 tap_intr_handle_set(dev, 0);
480 tap_link_set_down(dev);
484 tap_dev_configure(struct rte_eth_dev *dev __rte_unused)
490 tap_dev_speed_capa(void)
492 uint32_t speed = pmd_link.link_speed;
495 if (speed >= ETH_SPEED_NUM_10M)
496 capa |= ETH_LINK_SPEED_10M;
497 if (speed >= ETH_SPEED_NUM_100M)
498 capa |= ETH_LINK_SPEED_100M;
499 if (speed >= ETH_SPEED_NUM_1G)
500 capa |= ETH_LINK_SPEED_1G;
501 if (speed >= ETH_SPEED_NUM_5G)
502 capa |= ETH_LINK_SPEED_2_5G;
503 if (speed >= ETH_SPEED_NUM_5G)
504 capa |= ETH_LINK_SPEED_5G;
505 if (speed >= ETH_SPEED_NUM_10G)
506 capa |= ETH_LINK_SPEED_10G;
507 if (speed >= ETH_SPEED_NUM_20G)
508 capa |= ETH_LINK_SPEED_20G;
509 if (speed >= ETH_SPEED_NUM_25G)
510 capa |= ETH_LINK_SPEED_25G;
511 if (speed >= ETH_SPEED_NUM_40G)
512 capa |= ETH_LINK_SPEED_40G;
513 if (speed >= ETH_SPEED_NUM_50G)
514 capa |= ETH_LINK_SPEED_50G;
515 if (speed >= ETH_SPEED_NUM_56G)
516 capa |= ETH_LINK_SPEED_56G;
517 if (speed >= ETH_SPEED_NUM_100G)
518 capa |= ETH_LINK_SPEED_100G;
524 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
526 struct pmd_internals *internals = dev->data->dev_private;
528 dev_info->if_index = internals->if_index;
529 dev_info->max_mac_addrs = 1;
530 dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
531 dev_info->max_rx_queues = internals->nb_queues;
532 dev_info->max_tx_queues = internals->nb_queues;
533 dev_info->min_rx_bufsize = 0;
534 dev_info->pci_dev = NULL;
535 dev_info->speed_capa = tap_dev_speed_capa();
539 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
541 unsigned int i, imax;
542 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
543 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
544 unsigned long rx_nombuf = 0, ierrors = 0;
545 const struct pmd_internals *pmd = dev->data->dev_private;
547 imax = (pmd->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
548 pmd->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
550 for (i = 0; i < imax; i++) {
551 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
552 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
553 rx_total += tap_stats->q_ipackets[i];
554 rx_bytes_total += tap_stats->q_ibytes[i];
555 rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
556 ierrors += pmd->rxq[i].stats.ierrors;
558 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
559 tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
560 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
561 tx_total += tap_stats->q_opackets[i];
562 tx_err_total += tap_stats->q_errors[i];
563 tx_bytes_total += tap_stats->q_obytes[i];
566 tap_stats->ipackets = rx_total;
567 tap_stats->ibytes = rx_bytes_total;
568 tap_stats->ierrors = ierrors;
569 tap_stats->rx_nombuf = rx_nombuf;
570 tap_stats->opackets = tx_total;
571 tap_stats->oerrors = tx_err_total;
572 tap_stats->obytes = tx_bytes_total;
576 tap_stats_reset(struct rte_eth_dev *dev)
579 struct pmd_internals *pmd = dev->data->dev_private;
581 for (i = 0; i < pmd->nb_queues; i++) {
582 pmd->rxq[i].stats.ipackets = 0;
583 pmd->rxq[i].stats.ibytes = 0;
584 pmd->rxq[i].stats.ierrors = 0;
585 pmd->rxq[i].stats.rx_nombuf = 0;
587 pmd->txq[i].stats.opackets = 0;
588 pmd->txq[i].stats.errs = 0;
589 pmd->txq[i].stats.obytes = 0;
594 tap_dev_close(struct rte_eth_dev *dev __rte_unused)
597 struct pmd_internals *internals = dev->data->dev_private;
599 tap_link_set_down(dev);
600 tap_flow_flush(dev, NULL);
601 tap_flow_implicit_flush(internals, NULL);
603 for (i = 0; i < internals->nb_queues; i++) {
604 if (internals->rxq[i].fd != -1)
605 close(internals->rxq[i].fd);
606 internals->rxq[i].fd = -1;
607 internals->txq[i].fd = -1;
612 tap_rx_queue_release(void *queue)
614 struct rx_queue *rxq = queue;
616 if (rxq && (rxq->fd > 0)) {
619 rte_pktmbuf_free(rxq->pool);
620 rte_free(rxq->iovecs);
627 tap_tx_queue_release(void *queue)
629 struct tx_queue *txq = queue;
631 if (txq && (txq->fd > 0)) {
638 tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
640 struct rte_eth_link *dev_link = &dev->data->dev_link;
641 struct pmd_internals *pmd = dev->data->dev_private;
642 struct ifreq ifr = { .ifr_flags = 0 };
644 if (pmd->remote_if_index) {
645 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
646 if (!(ifr.ifr_flags & IFF_UP) ||
647 !(ifr.ifr_flags & IFF_RUNNING)) {
648 dev_link->link_status = ETH_LINK_DOWN;
652 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
653 dev_link->link_status =
654 ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
661 tap_promisc_enable(struct rte_eth_dev *dev)
663 struct pmd_internals *pmd = dev->data->dev_private;
664 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
666 dev->data->promiscuous = 1;
667 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
668 if (pmd->remote_if_index)
669 tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
673 tap_promisc_disable(struct rte_eth_dev *dev)
675 struct pmd_internals *pmd = dev->data->dev_private;
676 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
678 dev->data->promiscuous = 0;
679 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
680 if (pmd->remote_if_index)
681 tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
685 tap_allmulti_enable(struct rte_eth_dev *dev)
687 struct pmd_internals *pmd = dev->data->dev_private;
688 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
690 dev->data->all_multicast = 1;
691 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
692 if (pmd->remote_if_index)
693 tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
697 tap_allmulti_disable(struct rte_eth_dev *dev)
699 struct pmd_internals *pmd = dev->data->dev_private;
700 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
702 dev->data->all_multicast = 0;
703 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
704 if (pmd->remote_if_index)
705 tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
709 tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
711 struct pmd_internals *pmd = dev->data->dev_private;
712 enum ioctl_mode mode = LOCAL_ONLY;
715 if (is_zero_ether_addr(mac_addr)) {
716 RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
720 /* Check the actual current MAC address on the tap netdevice */
721 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
723 if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
726 /* Check the current MAC address on the remote */
727 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0)
729 if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
731 mode = LOCAL_AND_REMOTE;
732 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
733 rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
734 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode) < 0)
736 rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
737 if (pmd->remote_if_index) {
738 /* Replace MAC redirection rule after a MAC change */
739 if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) {
741 "%s: Couldn't delete MAC redirection rule\n",
745 if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
747 "%s: Couldn't add MAC redirection rule\n",
753 tap_setup_queue(struct rte_eth_dev *dev,
754 struct pmd_internals *internals,
757 struct pmd_internals *pmd = dev->data->dev_private;
758 struct rx_queue *rx = &internals->rxq[qid];
759 struct tx_queue *tx = &internals->txq[qid];
760 int fd = rx->fd == -1 ? tx->fd : rx->fd;
763 RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n",
767 RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n",
775 tx->mtu = &dev->data->mtu;
776 rx->rxmode = &dev->data->dev_conf.rxmode;
782 tap_rx_queue_setup(struct rte_eth_dev *dev,
783 uint16_t rx_queue_id,
785 unsigned int socket_id,
786 const struct rte_eth_rxconf *rx_conf __rte_unused,
787 struct rte_mempool *mp)
789 struct pmd_internals *internals = dev->data->dev_private;
790 struct rx_queue *rxq = &internals->rxq[rx_queue_id];
791 struct rte_mbuf **tmp = &rxq->pool;
792 long iov_max = sysconf(_SC_IOV_MAX);
793 uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
794 struct iovec (*iovecs)[nb_desc + 1];
795 int data_off = RTE_PKTMBUF_HEADROOM;
800 if ((rx_queue_id >= internals->nb_queues) || !mp) {
801 RTE_LOG(WARNING, PMD,
802 "nb_queues %d too small or mempool NULL\n",
803 internals->nb_queues);
808 rxq->trigger_seen = 1; /* force initial burst */
809 rxq->in_port = dev->data->port_id;
810 rxq->nb_rx_desc = nb_desc;
811 iovecs = rte_zmalloc_socket(dev->data->name, sizeof(*iovecs), 0,
814 RTE_LOG(WARNING, PMD,
815 "%s: Couldn't allocate %d RX descriptors\n",
816 dev->data->name, nb_desc);
819 rxq->iovecs = iovecs;
821 dev->data->rx_queues[rx_queue_id] = rxq;
822 fd = tap_setup_queue(dev, internals, rx_queue_id);
828 (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
829 (*rxq->iovecs)[0].iov_base = &rxq->pi;
831 for (i = 1; i <= nb_desc; i++) {
832 *tmp = rte_pktmbuf_alloc(rxq->mp);
834 RTE_LOG(WARNING, PMD,
835 "%s: couldn't allocate memory for queue %d\n",
836 dev->data->name, rx_queue_id);
840 (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
841 (*rxq->iovecs)[i].iov_base =
842 (char *)(*tmp)->buf_addr + data_off;
847 RTE_LOG(DEBUG, PMD, " RX TAP device name %s, qid %d on fd %d\n",
848 internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
853 rte_pktmbuf_free(rxq->pool);
855 rte_free(rxq->iovecs);
861 tap_tx_queue_setup(struct rte_eth_dev *dev,
862 uint16_t tx_queue_id,
863 uint16_t nb_tx_desc __rte_unused,
864 unsigned int socket_id __rte_unused,
865 const struct rte_eth_txconf *tx_conf __rte_unused)
867 struct pmd_internals *internals = dev->data->dev_private;
870 if (tx_queue_id >= internals->nb_queues)
873 dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
874 ret = tap_setup_queue(dev, internals, tx_queue_id);
878 RTE_LOG(DEBUG, PMD, " TX TAP device name %s, qid %d on fd %d\n",
879 internals->name, tx_queue_id, internals->txq[tx_queue_id].fd);
885 tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
887 struct pmd_internals *pmd = dev->data->dev_private;
888 struct ifreq ifr = { .ifr_mtu = mtu };
891 err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
893 dev->data->mtu = mtu;
899 tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
900 struct ether_addr *mc_addr_set __rte_unused,
901 uint32_t nb_mc_addr __rte_unused)
904 * Nothing to do actually: the tap has no filtering whatsoever, every
905 * packet is received.
911 tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
913 struct rte_eth_dev *dev = arg;
914 struct pmd_internals *pmd = dev->data->dev_private;
915 struct ifinfomsg *info = NLMSG_DATA(nh);
917 if (nh->nlmsg_type != RTM_NEWLINK ||
918 (info->ifi_index != pmd->if_index &&
919 info->ifi_index != pmd->remote_if_index))
921 return tap_link_update(dev, 0);
925 tap_dev_intr_handler(void *cb_arg)
927 struct rte_eth_dev *dev = cb_arg;
928 struct pmd_internals *pmd = dev->data->dev_private;
930 nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
934 tap_intr_handle_set(struct rte_eth_dev *dev, int set)
936 struct pmd_internals *pmd = dev->data->dev_private;
938 /* In any case, disable interrupt if the conf is no longer there. */
939 if (!dev->data->dev_conf.intr_conf.lsc) {
940 if (pmd->intr_handle.fd != -1)
941 nl_final(pmd->intr_handle.fd);
942 rte_intr_callback_unregister(
943 &pmd->intr_handle, tap_dev_intr_handler, dev);
947 pmd->intr_handle.fd = nl_init(RTMGRP_LINK);
948 if (unlikely(pmd->intr_handle.fd == -1))
950 return rte_intr_callback_register(
951 &pmd->intr_handle, tap_dev_intr_handler, dev);
953 nl_final(pmd->intr_handle.fd);
954 return rte_intr_callback_unregister(&pmd->intr_handle,
955 tap_dev_intr_handler, dev);
958 static const uint32_t*
959 tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
961 static const uint32_t ptypes[] = {
962 RTE_PTYPE_INNER_L2_ETHER,
963 RTE_PTYPE_INNER_L2_ETHER_VLAN,
964 RTE_PTYPE_INNER_L2_ETHER_QINQ,
965 RTE_PTYPE_INNER_L3_IPV4,
966 RTE_PTYPE_INNER_L3_IPV4_EXT,
967 RTE_PTYPE_INNER_L3_IPV6,
968 RTE_PTYPE_INNER_L3_IPV6_EXT,
969 RTE_PTYPE_INNER_L4_FRAG,
970 RTE_PTYPE_INNER_L4_UDP,
971 RTE_PTYPE_INNER_L4_TCP,
972 RTE_PTYPE_INNER_L4_SCTP,
974 RTE_PTYPE_L2_ETHER_VLAN,
975 RTE_PTYPE_L2_ETHER_QINQ,
977 RTE_PTYPE_L3_IPV4_EXT,
978 RTE_PTYPE_L3_IPV6_EXT,
990 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
991 struct rte_eth_fc_conf *fc_conf)
993 fc_conf->mode = RTE_FC_NONE;
998 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
999 struct rte_eth_fc_conf *fc_conf)
1001 if (fc_conf->mode != RTE_FC_NONE)
1006 static const struct eth_dev_ops ops = {
1007 .dev_start = tap_dev_start,
1008 .dev_stop = tap_dev_stop,
1009 .dev_close = tap_dev_close,
1010 .dev_configure = tap_dev_configure,
1011 .dev_infos_get = tap_dev_info,
1012 .rx_queue_setup = tap_rx_queue_setup,
1013 .tx_queue_setup = tap_tx_queue_setup,
1014 .rx_queue_release = tap_rx_queue_release,
1015 .tx_queue_release = tap_tx_queue_release,
1016 .flow_ctrl_get = tap_flow_ctrl_get,
1017 .flow_ctrl_set = tap_flow_ctrl_set,
1018 .link_update = tap_link_update,
1019 .dev_set_link_up = tap_link_set_up,
1020 .dev_set_link_down = tap_link_set_down,
1021 .promiscuous_enable = tap_promisc_enable,
1022 .promiscuous_disable = tap_promisc_disable,
1023 .allmulticast_enable = tap_allmulti_enable,
1024 .allmulticast_disable = tap_allmulti_disable,
1025 .mac_addr_set = tap_mac_set,
1026 .mtu_set = tap_mtu_set,
1027 .set_mc_addr_list = tap_set_mc_addr_list,
1028 .stats_get = tap_stats_get,
1029 .stats_reset = tap_stats_reset,
1030 .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
1031 .filter_ctrl = tap_dev_filter_ctrl,
1035 tap_kernel_support(struct pmd_internals *pmd)
1037 struct utsname utsname;
1040 if (uname(&utsname) == -1 ||
1041 sscanf(utsname.release, "%d.%d.%d",
1042 &ver[0], &ver[1], &ver[2]) != 3)
1044 if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >= FLOWER_KERNEL_VERSION)
1045 pmd->flower_support = 1;
1046 if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >=
1047 FLOWER_VLAN_KERNEL_VERSION)
1048 pmd->flower_vlan_support = 1;
1053 eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
1054 char *remote_iface, int fixed_mac_type)
1056 int numa_node = rte_socket_id();
1057 struct rte_eth_dev *dev;
1058 struct pmd_internals *pmd;
1059 struct rte_eth_dev_data *data;
1063 RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
1065 data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node);
1067 RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n");
1071 dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1073 RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n");
1077 pmd = dev->data->dev_private;
1078 snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
1079 pmd->nb_queues = RTE_PMD_TAP_MAX_QUEUES;
1081 pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
1082 if (pmd->ioctl_sock == -1) {
1084 "TAP Unable to get a socket for management: %s\n",
1089 /* Setup some default values */
1090 rte_memcpy(data, dev->data, sizeof(*data));
1091 data->dev_private = pmd;
1092 data->dev_flags = RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
1093 data->numa_node = numa_node;
1094 data->drv_name = pmd_tap_drv.driver.name;
1096 data->dev_link = pmd_link;
1097 data->mac_addrs = &pmd->eth_addr;
1098 data->nb_rx_queues = pmd->nb_queues;
1099 data->nb_tx_queues = pmd->nb_queues;
1102 dev->dev_ops = &ops;
1103 dev->rx_pkt_burst = pmd_rx_burst;
1104 dev->tx_pkt_burst = pmd_tx_burst;
1106 pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
1107 pmd->intr_handle.fd = -1;
1109 /* Presetup the fds to -1 as being not valid */
1110 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1111 pmd->rxq[i].fd = -1;
1112 pmd->txq[i].fd = -1;
1115 if (fixed_mac_type) {
1116 /* fixed mac = 00:64:74:61:70:<iface_idx> */
1117 static int iface_idx;
1118 char mac[ETHER_ADDR_LEN] = "\0dtap";
1120 mac[ETHER_ADDR_LEN - 1] = iface_idx++;
1121 rte_memcpy(&pmd->eth_addr, mac, ETHER_ADDR_LEN);
1123 eth_random_addr((uint8_t *)&pmd->eth_addr);
1126 /* Immediately create the netdevice (this will create the 1st queue). */
1127 if (tap_setup_queue(dev, pmd, 0) == -1)
1130 ifr.ifr_mtu = dev->data->mtu;
1131 if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
1134 memset(&ifr, 0, sizeof(struct ifreq));
1135 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1136 rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, ETHER_ADDR_LEN);
1137 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
1140 tap_kernel_support(pmd);
1141 if (!pmd->flower_support) {
1142 if (remote_iface[0]) {
1144 "%s: kernel does not support TC rules, required for remote feature.",
1149 "%s: kernel too old for Flow API support.\n",
1156 * Set up everything related to rte_flow:
1158 * - tap / remote if_index
1159 * - mandatory QDISCs
1160 * - rte_flow actual/implicit lists
1163 pmd->nlsk_fd = nl_init(0);
1164 if (pmd->nlsk_fd == -1) {
1165 RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.",
1167 goto disable_rte_flow;
1169 pmd->if_index = if_nametoindex(pmd->name);
1170 if (!pmd->if_index) {
1171 RTE_LOG(ERR, PMD, "%s: failed to get if_index.", pmd->name);
1172 goto disable_rte_flow;
1174 if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
1175 RTE_LOG(ERR, PMD, "%s: failed to create multiq qdisc.",
1177 goto disable_rte_flow;
1179 if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
1180 RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.",
1182 goto disable_rte_flow;
1184 LIST_INIT(&pmd->flows);
1186 if (strlen(remote_iface)) {
1187 pmd->remote_if_index = if_nametoindex(remote_iface);
1188 if (!pmd->remote_if_index) {
1189 RTE_LOG(ERR, PMD, "%s: failed to get %s if_index.",
1190 pmd->name, remote_iface);
1193 snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
1194 "%s", remote_iface);
1195 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
1196 RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.",
1197 pmd->name, pmd->remote_iface);
1200 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
1202 /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
1203 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
1204 RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.",
1205 pmd->name, remote_iface);
1210 * Flush usually returns negative value because it tries to
1211 * delete every QDISC (and on a running device, one QDISC at
1212 * least is needed). Ignore negative return value.
1214 qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
1215 if (qdisc_create_ingress(pmd->nlsk_fd,
1216 pmd->remote_if_index) < 0) {
1217 RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.",
1221 LIST_INIT(&pmd->implicit_flows);
1222 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
1223 tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
1224 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
1225 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
1227 "%s: failed to create implicit rules.",
1236 RTE_LOG(ERR, PMD, " Disabling rte flow support: %s(%d)\n",
1237 strerror(errno), errno);
1238 if (strlen(remote_iface)) {
1239 RTE_LOG(ERR, PMD, "Remote feature requires flow support.\n");
1242 pmd->flower_support = 0;
1246 RTE_LOG(ERR, PMD, " Can't set up remote feature: %s(%d)\n",
1247 strerror(errno), errno);
1248 tap_flow_implicit_flush(pmd, NULL);
1251 RTE_LOG(ERR, PMD, "TAP Unable to initialize %s\n",
1252 rte_vdev_device_name(vdev));
1259 set_interface_name(const char *key __rte_unused,
1263 char *name = (char *)extra_args;
1266 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s", value);
1268 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
1269 DEFAULT_TAP_NAME, (tap_unit - 1));
1275 set_interface_speed(const char *key __rte_unused,
1279 *(int *)extra_args = (value) ? atoi(value) : ETH_SPEED_NUM_10G;
1285 set_remote_iface(const char *key __rte_unused,
1289 char *name = (char *)extra_args;
1292 snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s", value);
1298 set_mac_type(const char *key __rte_unused,
1303 !strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED)))
1304 *(int *)extra_args = 1;
1308 /* Open a TAP interface device.
1311 rte_pmd_tap_probe(struct rte_vdev_device *dev)
1313 const char *name, *params;
1315 struct rte_kvargs *kvlist = NULL;
1317 char tap_name[RTE_ETH_NAME_MAX_LEN];
1318 char remote_iface[RTE_ETH_NAME_MAX_LEN];
1319 int fixed_mac_type = 0;
1321 name = rte_vdev_device_name(dev);
1322 params = rte_vdev_device_args(dev);
1324 speed = ETH_SPEED_NUM_10G;
1325 snprintf(tap_name, sizeof(tap_name), "%s%d",
1326 DEFAULT_TAP_NAME, tap_unit++);
1327 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
1329 if (params && (params[0] != '\0')) {
1330 RTE_LOG(DEBUG, PMD, "paramaters (%s)\n", params);
1332 kvlist = rte_kvargs_parse(params, valid_arguments);
1334 if (rte_kvargs_count(kvlist, ETH_TAP_SPEED_ARG) == 1) {
1335 ret = rte_kvargs_process(kvlist,
1337 &set_interface_speed,
1343 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
1344 ret = rte_kvargs_process(kvlist,
1346 &set_interface_name,
1352 if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
1353 ret = rte_kvargs_process(kvlist,
1361 if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
1362 ret = rte_kvargs_process(kvlist,
1371 pmd_link.link_speed = speed;
1373 RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
1376 ret = eth_dev_tap_create(dev, tap_name, remote_iface, fixed_mac_type);
1380 RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
1382 tap_unit--; /* Restore the unit number */
1384 rte_kvargs_free(kvlist);
1389 /* detach a TAP device.
1392 rte_pmd_tap_remove(struct rte_vdev_device *dev)
1394 struct rte_eth_dev *eth_dev = NULL;
1395 struct pmd_internals *internals;
1398 RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n",
1401 /* find the ethdev entry */
1402 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1406 internals = eth_dev->data->dev_private;
1407 if (internals->flower_support && internals->nlsk_fd) {
1408 tap_flow_flush(eth_dev, NULL);
1409 tap_flow_implicit_flush(internals, NULL);
1410 nl_final(internals->nlsk_fd);
1412 for (i = 0; i < internals->nb_queues; i++)
1413 if (internals->rxq[i].fd != -1)
1414 close(internals->rxq[i].fd);
1416 close(internals->ioctl_sock);
1417 rte_free(eth_dev->data->dev_private);
1418 rte_free(eth_dev->data);
1420 rte_eth_dev_release_port(eth_dev);
1425 static struct rte_vdev_driver pmd_tap_drv = {
1426 .probe = rte_pmd_tap_probe,
1427 .remove = rte_pmd_tap_remove,
1429 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
1430 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
1431 RTE_PMD_REGISTER_PARAM_STRING(net_tap,
1432 ETH_TAP_IFACE_ARG "=<string> "
1433 ETH_TAP_SPEED_ARG "=<int> "
1434 ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_FIXED " "
1435 ETH_TAP_REMOTE_ARG "=<string>");