4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_atomic.h>
35 #include <rte_branch_prediction.h>
36 #include <rte_common.h>
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
42 #include <rte_kvargs.h>
45 #include <sys/types.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/utsname.h>
56 #include <arpa/inet.h>
58 #include <linux/if_tun.h>
59 #include <linux/if_ether.h>
60 #include <linux/version.h>
63 #include <rte_eth_tap.h>
65 #include <tap_netlink.h>
66 #include <tap_tcmsgs.h>
68 /* Linux based path to the TUN device */
69 #define TUN_TAP_DEV_PATH "/dev/net/tun"
70 #define DEFAULT_TAP_NAME "dtap"
72 #define ETH_TAP_IFACE_ARG "iface"
73 #define ETH_TAP_SPEED_ARG "speed"
74 #define ETH_TAP_REMOTE_ARG "remote"
75 #define ETH_TAP_MAC_ARG "mac"
76 #define ETH_TAP_MAC_FIXED "fixed"
78 #define FLOWER_KERNEL_VERSION KERNEL_VERSION(4, 2, 0)
79 #define FLOWER_VLAN_KERNEL_VERSION KERNEL_VERSION(4, 9, 0)
81 static struct rte_vdev_driver pmd_tap_drv;
83 static const char *valid_arguments[] = {
93 static volatile uint32_t tap_trigger; /* Rx trigger */
95 static struct rte_eth_link pmd_link = {
96 .link_speed = ETH_SPEED_NUM_10G,
97 .link_duplex = ETH_LINK_FULL_DUPLEX,
98 .link_status = ETH_LINK_DOWN,
99 .link_autoneg = ETH_LINK_SPEED_AUTONEG
103 tap_trigger_cb(int sig __rte_unused)
105 /* Valid trigger values are nonzero */
106 tap_trigger = (tap_trigger + 1) | 0x80000000;
109 /* Specifies on what netdevices the ioctl should be applied */
117 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
118 struct ifreq *ifr, int set, enum ioctl_mode mode);
120 static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
122 /* Tun/Tap allocation routine
124 * name is the number of the interface to use, unless NULL to take the host
128 tun_alloc(struct pmd_internals *pmd, uint16_t qid)
131 #ifdef IFF_MULTI_QUEUE
132 unsigned int features;
136 memset(&ifr, 0, sizeof(struct ifreq));
139 * Do not set IFF_NO_PI as packet information header will be needed
140 * to check if a received packet has been truncated.
142 ifr.ifr_flags = IFF_TAP;
143 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
145 RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name);
147 fd = open(TUN_TAP_DEV_PATH, O_RDWR);
149 RTE_LOG(ERR, PMD, "Unable to create TAP interface");
153 #ifdef IFF_MULTI_QUEUE
154 /* Grab the TUN features to verify we can work multi-queue */
155 if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
156 RTE_LOG(ERR, PMD, "TAP unable to get TUN/TAP features\n");
159 RTE_LOG(DEBUG, PMD, " TAP Features %08x\n", features);
161 if (features & IFF_MULTI_QUEUE) {
162 RTE_LOG(DEBUG, PMD, " Multi-queue support for %d queues\n",
163 RTE_PMD_TAP_MAX_QUEUES);
164 ifr.ifr_flags |= IFF_MULTI_QUEUE;
168 ifr.ifr_flags |= IFF_ONE_QUEUE;
169 RTE_LOG(DEBUG, PMD, " Single queue only support\n");
172 /* Set the TUN/TAP configuration and set the name if needed */
173 if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
174 RTE_LOG(WARNING, PMD,
175 "Unable to set TUNSETIFF for %s\n",
181 /* Always set the file descriptor to non-blocking */
182 if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
183 RTE_LOG(WARNING, PMD,
184 "Unable to set %s to nonblocking\n",
186 perror("F_SETFL, NONBLOCK");
190 /* Set up trigger to optimize empty Rx bursts */
194 int flags = fcntl(fd, F_GETFL);
196 if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
198 if (sa.sa_handler != tap_trigger_cb) {
200 * Make sure SIGIO is not already taken. This is done
201 * as late as possible to leave the application a
202 * chance to set up its own signal handler first.
204 if (sa.sa_handler != SIG_IGN &&
205 sa.sa_handler != SIG_DFL) {
209 sa = (struct sigaction){
210 .sa_flags = SA_RESTART,
211 .sa_handler = tap_trigger_cb,
213 if (sigaction(SIGIO, &sa, NULL) == -1)
216 /* Enable SIGIO on file descriptor */
217 fcntl(fd, F_SETFL, flags | O_ASYNC);
218 fcntl(fd, F_SETOWN, getpid());
221 /* Disable trigger globally in case of error */
223 RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n",
231 * pmd->eth_addr contains the desired MAC, either from remote
232 * or from a random assignment. Sync it with the tap netdevice.
234 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
235 rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
237 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
240 pmd->if_index = if_nametoindex(pmd->name);
241 if (!pmd->if_index) {
243 "Could not find ifindex for %s: rte_flow won't be usable.\n",
247 if (!pmd->flower_support)
249 if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
251 "Could not create multiq qdisc for %s: rte_flow won't be usable.\n",
255 if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
257 "Could not create multiq qdisc for %s: rte_flow won't be usable.\n",
261 if (pmd->remote_if_index) {
263 * Flush usually returns negative value because it tries
264 * to delete every QDISC (and on a running device, one
265 * QDISC at least is needed). Ignore negative return
268 qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
269 if (qdisc_create_ingress(pmd->nlsk_fd,
270 pmd->remote_if_index) < 0)
272 LIST_INIT(&pmd->implicit_flows);
273 if (tap_flow_implicit_create(
274 pmd, TAP_REMOTE_LOCAL_MAC) < 0)
276 if (tap_flow_implicit_create(
277 pmd, TAP_REMOTE_BROADCAST) < 0)
279 if (tap_flow_implicit_create(
280 pmd, TAP_REMOTE_BROADCASTV6) < 0)
282 if (tap_flow_implicit_create(
283 pmd, TAP_REMOTE_TX) < 0)
292 "Could not set up remote flow rules for %s: remote disabled.\n",
294 pmd->remote_if_index = 0;
295 tap_flow_implicit_flush(pmd, NULL);
304 /* Callback to handle the rx burst of packets to the correct interface and
305 * file descriptor(s) in a multi-queue setup.
308 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
310 struct rx_queue *rxq = queue;
312 unsigned long num_rx_bytes = 0;
313 uint32_t trigger = tap_trigger;
315 if (trigger == rxq->trigger_seen)
318 rxq->trigger_seen = trigger;
319 rte_compiler_barrier();
320 for (num_rx = 0; num_rx < nb_pkts; ) {
321 struct rte_mbuf *mbuf = rxq->pool;
322 struct rte_mbuf *seg = NULL;
323 struct rte_mbuf *new_tail = NULL;
324 uint16_t data_off = rte_pktmbuf_headroom(mbuf);
327 len = readv(rxq->fd, *rxq->iovecs,
328 1 + (rxq->rxmode->enable_scatter ?
329 rxq->nb_rx_desc : 1));
330 if (len < (int)sizeof(struct tun_pi))
333 /* Packet couldn't fit in the provided mbuf */
334 if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
335 rxq->stats.ierrors++;
339 len -= sizeof(struct tun_pi);
342 mbuf->port = rxq->in_port;
344 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
346 if (unlikely(!buf)) {
347 rxq->stats.rx_nombuf++;
348 /* No new buf has been allocated: do nothing */
349 if (!new_tail || !seg)
353 rte_pktmbuf_free(mbuf);
357 seg = seg ? seg->next : mbuf;
358 if (rxq->pool == mbuf)
361 new_tail->next = buf;
363 new_tail->next = seg->next;
365 /* iovecs[0] is reserved for packet info (pi) */
366 (*rxq->iovecs)[mbuf->nb_segs].iov_len =
367 buf->buf_len - data_off;
368 (*rxq->iovecs)[mbuf->nb_segs].iov_base =
369 (char *)buf->buf_addr + data_off;
371 seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
372 seg->data_off = data_off;
374 len -= seg->data_len;
378 /* First segment has headroom, not the others */
382 mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
385 /* account for the receive frame */
386 bufs[num_rx++] = mbuf;
387 num_rx_bytes += mbuf->pkt_len;
390 rxq->stats.ipackets += num_rx;
391 rxq->stats.ibytes += num_rx_bytes;
396 /* Callback to handle sending packets from the tap interface
399 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
401 struct tx_queue *txq = queue;
403 unsigned long num_tx_bytes = 0;
407 if (unlikely(nb_pkts == 0))
410 max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
411 for (i = 0; i < nb_pkts; i++) {
412 struct rte_mbuf *mbuf = bufs[num_tx];
413 struct iovec iovecs[mbuf->nb_segs + 1];
414 struct tun_pi pi = { .flags = 0 };
415 struct rte_mbuf *seg = mbuf;
419 /* stats.errs will be incremented */
420 if (rte_pktmbuf_pkt_len(mbuf) > max_size)
423 iovecs[0].iov_base = π
424 iovecs[0].iov_len = sizeof(pi);
425 for (j = 1; j <= mbuf->nb_segs; j++) {
426 iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
428 rte_pktmbuf_mtod(seg, void *);
431 /* copy the tx frame data */
432 n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
437 num_tx_bytes += mbuf->pkt_len;
438 rte_pktmbuf_free(mbuf);
441 txq->stats.opackets += num_tx;
442 txq->stats.errs += nb_pkts - num_tx;
443 txq->stats.obytes += num_tx_bytes;
449 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
450 struct ifreq *ifr, int set, enum ioctl_mode mode)
452 short req_flags = ifr->ifr_flags;
453 int remote = pmd->remote_if_index &&
454 (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
456 if (!pmd->remote_if_index && mode == REMOTE_ONLY)
459 * If there is a remote netdevice, apply ioctl on it, then apply it on
464 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
465 else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
466 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
469 /* fetch current flags to leave other flags untouched */
470 if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
473 ifr->ifr_flags |= req_flags;
475 ifr->ifr_flags &= ~req_flags;
483 RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
487 if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
489 if (remote-- && mode == LOCAL_AND_REMOTE)
494 RTE_LOG(ERR, PMD, "%s: ioctl(%lu) failed with error: %s\n",
495 ifr->ifr_name, request, strerror(errno));
500 tap_link_set_down(struct rte_eth_dev *dev)
502 struct pmd_internals *pmd = dev->data->dev_private;
503 struct ifreq ifr = { .ifr_flags = IFF_UP };
505 dev->data->dev_link.link_status = ETH_LINK_DOWN;
506 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
510 tap_link_set_up(struct rte_eth_dev *dev)
512 struct pmd_internals *pmd = dev->data->dev_private;
513 struct ifreq ifr = { .ifr_flags = IFF_UP };
515 dev->data->dev_link.link_status = ETH_LINK_UP;
516 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
520 tap_dev_start(struct rte_eth_dev *dev)
524 err = tap_intr_handle_set(dev, 1);
527 return tap_link_set_up(dev);
530 /* This function gets called when the current port gets stopped.
533 tap_dev_stop(struct rte_eth_dev *dev)
535 tap_intr_handle_set(dev, 0);
536 tap_link_set_down(dev);
540 tap_dev_configure(struct rte_eth_dev *dev __rte_unused)
546 tap_dev_speed_capa(void)
548 uint32_t speed = pmd_link.link_speed;
551 if (speed >= ETH_SPEED_NUM_10M)
552 capa |= ETH_LINK_SPEED_10M;
553 if (speed >= ETH_SPEED_NUM_100M)
554 capa |= ETH_LINK_SPEED_100M;
555 if (speed >= ETH_SPEED_NUM_1G)
556 capa |= ETH_LINK_SPEED_1G;
557 if (speed >= ETH_SPEED_NUM_5G)
558 capa |= ETH_LINK_SPEED_2_5G;
559 if (speed >= ETH_SPEED_NUM_5G)
560 capa |= ETH_LINK_SPEED_5G;
561 if (speed >= ETH_SPEED_NUM_10G)
562 capa |= ETH_LINK_SPEED_10G;
563 if (speed >= ETH_SPEED_NUM_20G)
564 capa |= ETH_LINK_SPEED_20G;
565 if (speed >= ETH_SPEED_NUM_25G)
566 capa |= ETH_LINK_SPEED_25G;
567 if (speed >= ETH_SPEED_NUM_40G)
568 capa |= ETH_LINK_SPEED_40G;
569 if (speed >= ETH_SPEED_NUM_50G)
570 capa |= ETH_LINK_SPEED_50G;
571 if (speed >= ETH_SPEED_NUM_56G)
572 capa |= ETH_LINK_SPEED_56G;
573 if (speed >= ETH_SPEED_NUM_100G)
574 capa |= ETH_LINK_SPEED_100G;
580 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
582 struct pmd_internals *internals = dev->data->dev_private;
584 dev_info->if_index = internals->if_index;
585 dev_info->max_mac_addrs = 1;
586 dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
587 dev_info->max_rx_queues = internals->nb_queues;
588 dev_info->max_tx_queues = internals->nb_queues;
589 dev_info->min_rx_bufsize = 0;
590 dev_info->pci_dev = NULL;
591 dev_info->speed_capa = tap_dev_speed_capa();
595 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
597 unsigned int i, imax;
598 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
599 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
600 unsigned long rx_nombuf = 0, ierrors = 0;
601 const struct pmd_internals *pmd = dev->data->dev_private;
603 imax = (pmd->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
604 pmd->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
606 for (i = 0; i < imax; i++) {
607 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
608 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
609 rx_total += tap_stats->q_ipackets[i];
610 rx_bytes_total += tap_stats->q_ibytes[i];
611 rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
612 ierrors += pmd->rxq[i].stats.ierrors;
614 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
615 tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
616 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
617 tx_total += tap_stats->q_opackets[i];
618 tx_err_total += tap_stats->q_errors[i];
619 tx_bytes_total += tap_stats->q_obytes[i];
622 tap_stats->ipackets = rx_total;
623 tap_stats->ibytes = rx_bytes_total;
624 tap_stats->ierrors = ierrors;
625 tap_stats->rx_nombuf = rx_nombuf;
626 tap_stats->opackets = tx_total;
627 tap_stats->oerrors = tx_err_total;
628 tap_stats->obytes = tx_bytes_total;
632 tap_stats_reset(struct rte_eth_dev *dev)
635 struct pmd_internals *pmd = dev->data->dev_private;
637 for (i = 0; i < pmd->nb_queues; i++) {
638 pmd->rxq[i].stats.ipackets = 0;
639 pmd->rxq[i].stats.ibytes = 0;
640 pmd->rxq[i].stats.ierrors = 0;
641 pmd->rxq[i].stats.rx_nombuf = 0;
643 pmd->txq[i].stats.opackets = 0;
644 pmd->txq[i].stats.errs = 0;
645 pmd->txq[i].stats.obytes = 0;
650 tap_dev_close(struct rte_eth_dev *dev __rte_unused)
653 struct pmd_internals *internals = dev->data->dev_private;
655 tap_link_set_down(dev);
656 tap_flow_flush(dev, NULL);
657 tap_flow_implicit_flush(internals, NULL);
659 for (i = 0; i < internals->nb_queues; i++) {
660 if (internals->rxq[i].fd != -1)
661 close(internals->rxq[i].fd);
662 internals->rxq[i].fd = -1;
663 internals->txq[i].fd = -1;
668 tap_rx_queue_release(void *queue)
670 struct rx_queue *rxq = queue;
672 if (rxq && (rxq->fd > 0)) {
675 rte_pktmbuf_free(rxq->pool);
676 rte_free(rxq->iovecs);
683 tap_tx_queue_release(void *queue)
685 struct tx_queue *txq = queue;
687 if (txq && (txq->fd > 0)) {
694 tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
696 struct rte_eth_link *dev_link = &dev->data->dev_link;
697 struct pmd_internals *pmd = dev->data->dev_private;
698 struct ifreq ifr = { .ifr_flags = 0 };
700 if (pmd->remote_if_index) {
701 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
702 if (!(ifr.ifr_flags & IFF_UP) ||
703 !(ifr.ifr_flags & IFF_RUNNING)) {
704 dev_link->link_status = ETH_LINK_DOWN;
708 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
709 dev_link->link_status =
710 ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
717 tap_promisc_enable(struct rte_eth_dev *dev)
719 struct pmd_internals *pmd = dev->data->dev_private;
720 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
722 dev->data->promiscuous = 1;
723 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
724 if (pmd->remote_if_index)
725 tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
729 tap_promisc_disable(struct rte_eth_dev *dev)
731 struct pmd_internals *pmd = dev->data->dev_private;
732 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
734 dev->data->promiscuous = 0;
735 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
736 if (pmd->remote_if_index)
737 tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
741 tap_allmulti_enable(struct rte_eth_dev *dev)
743 struct pmd_internals *pmd = dev->data->dev_private;
744 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
746 dev->data->all_multicast = 1;
747 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
748 if (pmd->remote_if_index)
749 tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
753 tap_allmulti_disable(struct rte_eth_dev *dev)
755 struct pmd_internals *pmd = dev->data->dev_private;
756 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
758 dev->data->all_multicast = 0;
759 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
760 if (pmd->remote_if_index)
761 tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
766 tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
768 struct pmd_internals *pmd = dev->data->dev_private;
771 if (is_zero_ether_addr(mac_addr)) {
772 RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
776 /* Check the actual current MAC address on the tap netdevice */
777 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) != 0) {
779 "%s: couldn't check current tap MAC address\n",
783 if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
787 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
788 rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
789 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, LOCAL_AND_REMOTE) < 0)
791 rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
792 if (pmd->remote_if_index) {
793 /* Replace MAC redirection rule after a MAC change */
794 if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) {
796 "%s: Couldn't delete MAC redirection rule\n",
800 if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
802 "%s: Couldn't add MAC redirection rule\n",
808 tap_setup_queue(struct rte_eth_dev *dev,
809 struct pmd_internals *internals,
812 struct pmd_internals *pmd = dev->data->dev_private;
813 struct rx_queue *rx = &internals->rxq[qid];
814 struct tx_queue *tx = &internals->txq[qid];
821 RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n",
823 fd = tun_alloc(pmd, qid);
825 RTE_LOG(ERR, PMD, "tun_alloc(%s, %d) failed\n",
832 ifr.ifr_mtu = dev->data->mtu;
833 if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1,
834 LOCAL_AND_REMOTE) < 0) {
844 tx->mtu = &dev->data->mtu;
845 rx->rxmode = &dev->data->dev_conf.rxmode;
851 rx_setup_queue(struct rte_eth_dev *dev,
852 struct pmd_internals *internals,
855 dev->data->rx_queues[qid] = &internals->rxq[qid];
857 return tap_setup_queue(dev, internals, qid);
861 tx_setup_queue(struct rte_eth_dev *dev,
862 struct pmd_internals *internals,
865 dev->data->tx_queues[qid] = &internals->txq[qid];
867 return tap_setup_queue(dev, internals, qid);
871 tap_rx_queue_setup(struct rte_eth_dev *dev,
872 uint16_t rx_queue_id,
874 unsigned int socket_id,
875 const struct rte_eth_rxconf *rx_conf __rte_unused,
876 struct rte_mempool *mp)
878 struct pmd_internals *internals = dev->data->dev_private;
879 struct rx_queue *rxq = &internals->rxq[rx_queue_id];
880 struct rte_mbuf **tmp = &rxq->pool;
881 long iov_max = sysconf(_SC_IOV_MAX);
882 uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
883 struct iovec (*iovecs)[nb_desc + 1];
884 int data_off = RTE_PKTMBUF_HEADROOM;
889 if ((rx_queue_id >= internals->nb_queues) || !mp) {
890 RTE_LOG(WARNING, PMD,
891 "nb_queues %d too small or mempool NULL\n",
892 internals->nb_queues);
897 rxq->trigger_seen = 1; /* force initial burst */
898 rxq->in_port = dev->data->port_id;
899 rxq->nb_rx_desc = nb_desc;
900 iovecs = rte_zmalloc_socket(dev->data->name, sizeof(*iovecs), 0,
903 RTE_LOG(WARNING, PMD,
904 "%s: Couldn't allocate %d RX descriptors\n",
905 dev->data->name, nb_desc);
908 rxq->iovecs = iovecs;
910 fd = rx_setup_queue(dev, internals, rx_queue_id);
916 (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
917 (*rxq->iovecs)[0].iov_base = &rxq->pi;
919 for (i = 1; i <= nb_desc; i++) {
920 *tmp = rte_pktmbuf_alloc(rxq->mp);
922 RTE_LOG(WARNING, PMD,
923 "%s: couldn't allocate memory for queue %d\n",
924 dev->data->name, rx_queue_id);
928 (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
929 (*rxq->iovecs)[i].iov_base =
930 (char *)(*tmp)->buf_addr + data_off;
935 RTE_LOG(DEBUG, PMD, " RX TAP device name %s, qid %d on fd %d\n",
936 internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
941 rte_pktmbuf_free(rxq->pool);
943 rte_free(rxq->iovecs);
949 tap_tx_queue_setup(struct rte_eth_dev *dev,
950 uint16_t tx_queue_id,
951 uint16_t nb_tx_desc __rte_unused,
952 unsigned int socket_id __rte_unused,
953 const struct rte_eth_txconf *tx_conf __rte_unused)
955 struct pmd_internals *internals = dev->data->dev_private;
958 if (tx_queue_id >= internals->nb_queues)
961 ret = tx_setup_queue(dev, internals, tx_queue_id);
965 RTE_LOG(DEBUG, PMD, " TX TAP device name %s, qid %d on fd %d\n",
966 internals->name, tx_queue_id, internals->txq[tx_queue_id].fd);
972 tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
974 struct pmd_internals *pmd = dev->data->dev_private;
975 struct ifreq ifr = { .ifr_mtu = mtu };
978 err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
980 dev->data->mtu = mtu;
986 tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
987 struct ether_addr *mc_addr_set __rte_unused,
988 uint32_t nb_mc_addr __rte_unused)
991 * Nothing to do actually: the tap has no filtering whatsoever, every
992 * packet is received.
998 tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
1000 struct rte_eth_dev *dev = arg;
1001 struct pmd_internals *pmd = dev->data->dev_private;
1002 struct ifinfomsg *info = NLMSG_DATA(nh);
1004 if (nh->nlmsg_type != RTM_NEWLINK ||
1005 (info->ifi_index != pmd->if_index &&
1006 info->ifi_index != pmd->remote_if_index))
1008 return tap_link_update(dev, 0);
1012 tap_dev_intr_handler(void *cb_arg)
1014 struct rte_eth_dev *dev = cb_arg;
1015 struct pmd_internals *pmd = dev->data->dev_private;
1017 nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
1021 tap_intr_handle_set(struct rte_eth_dev *dev, int set)
1023 struct pmd_internals *pmd = dev->data->dev_private;
1025 /* In any case, disable interrupt if the conf is no longer there. */
1026 if (!dev->data->dev_conf.intr_conf.lsc) {
1027 if (pmd->intr_handle.fd != -1)
1028 nl_final(pmd->intr_handle.fd);
1029 rte_intr_callback_unregister(
1030 &pmd->intr_handle, tap_dev_intr_handler, dev);
1034 pmd->intr_handle.fd = nl_init(RTMGRP_LINK);
1035 if (unlikely(pmd->intr_handle.fd == -1))
1037 return rte_intr_callback_register(
1038 &pmd->intr_handle, tap_dev_intr_handler, dev);
1040 nl_final(pmd->intr_handle.fd);
1041 return rte_intr_callback_unregister(&pmd->intr_handle,
1042 tap_dev_intr_handler, dev);
1045 static const uint32_t*
1046 tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1048 static const uint32_t ptypes[] = {
1049 RTE_PTYPE_INNER_L2_ETHER,
1050 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1051 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1052 RTE_PTYPE_INNER_L3_IPV4,
1053 RTE_PTYPE_INNER_L3_IPV4_EXT,
1054 RTE_PTYPE_INNER_L3_IPV6,
1055 RTE_PTYPE_INNER_L3_IPV6_EXT,
1056 RTE_PTYPE_INNER_L4_FRAG,
1057 RTE_PTYPE_INNER_L4_UDP,
1058 RTE_PTYPE_INNER_L4_TCP,
1059 RTE_PTYPE_INNER_L4_SCTP,
1061 RTE_PTYPE_L2_ETHER_VLAN,
1062 RTE_PTYPE_L2_ETHER_QINQ,
1064 RTE_PTYPE_L3_IPV4_EXT,
1065 RTE_PTYPE_L3_IPV6_EXT,
1077 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
1078 struct rte_eth_fc_conf *fc_conf)
1080 fc_conf->mode = RTE_FC_NONE;
1085 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
1086 struct rte_eth_fc_conf *fc_conf)
1088 if (fc_conf->mode != RTE_FC_NONE)
1093 static const struct eth_dev_ops ops = {
1094 .dev_start = tap_dev_start,
1095 .dev_stop = tap_dev_stop,
1096 .dev_close = tap_dev_close,
1097 .dev_configure = tap_dev_configure,
1098 .dev_infos_get = tap_dev_info,
1099 .rx_queue_setup = tap_rx_queue_setup,
1100 .tx_queue_setup = tap_tx_queue_setup,
1101 .rx_queue_release = tap_rx_queue_release,
1102 .tx_queue_release = tap_tx_queue_release,
1103 .flow_ctrl_get = tap_flow_ctrl_get,
1104 .flow_ctrl_set = tap_flow_ctrl_set,
1105 .link_update = tap_link_update,
1106 .dev_set_link_up = tap_link_set_up,
1107 .dev_set_link_down = tap_link_set_down,
1108 .promiscuous_enable = tap_promisc_enable,
1109 .promiscuous_disable = tap_promisc_disable,
1110 .allmulticast_enable = tap_allmulti_enable,
1111 .allmulticast_disable = tap_allmulti_disable,
1112 .mac_addr_set = tap_mac_set,
1113 .mtu_set = tap_mtu_set,
1114 .set_mc_addr_list = tap_set_mc_addr_list,
1115 .stats_get = tap_stats_get,
1116 .stats_reset = tap_stats_reset,
1117 .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
1118 .filter_ctrl = tap_dev_filter_ctrl,
1122 tap_kernel_support(struct pmd_internals *pmd)
1124 struct utsname utsname;
1127 if (uname(&utsname) == -1 ||
1128 sscanf(utsname.release, "%d.%d.%d",
1129 &ver[0], &ver[1], &ver[2]) != 3)
1131 if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >= FLOWER_KERNEL_VERSION)
1132 pmd->flower_support = 1;
1133 if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >=
1134 FLOWER_VLAN_KERNEL_VERSION)
1135 pmd->flower_vlan_support = 1;
1140 eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
1141 char *remote_iface, int fixed_mac_type)
1143 int numa_node = rte_socket_id();
1144 struct rte_eth_dev *dev;
1145 struct pmd_internals *pmd;
1146 struct rte_eth_dev_data *data;
1149 RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
1151 data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node);
1153 RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n");
1157 dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1159 RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n");
1163 pmd = dev->data->dev_private;
1164 snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
1165 pmd->nb_queues = RTE_PMD_TAP_MAX_QUEUES;
1167 pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
1168 if (pmd->ioctl_sock == -1) {
1170 "TAP Unable to get a socket for management: %s\n",
1175 /* Setup some default values */
1176 rte_memcpy(data, dev->data, sizeof(*data));
1177 data->dev_private = pmd;
1178 data->dev_flags = RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
1179 data->numa_node = numa_node;
1180 data->drv_name = pmd_tap_drv.driver.name;
1182 data->dev_link = pmd_link;
1183 data->mac_addrs = &pmd->eth_addr;
1184 data->nb_rx_queues = pmd->nb_queues;
1185 data->nb_tx_queues = pmd->nb_queues;
1188 dev->dev_ops = &ops;
1189 dev->rx_pkt_burst = pmd_rx_burst;
1190 dev->tx_pkt_burst = pmd_tx_burst;
1192 pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
1193 pmd->intr_handle.fd = -1;
1195 /* Presetup the fds to -1 as being not valid */
1196 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1197 pmd->rxq[i].fd = -1;
1198 pmd->txq[i].fd = -1;
1201 if (fixed_mac_type) {
1202 /* fixed mac = 00:64:74:61:70:<iface_idx> */
1203 static int iface_idx;
1204 char mac[ETHER_ADDR_LEN] = "\0dtap";
1206 mac[ETHER_ADDR_LEN - 1] = iface_idx++;
1207 rte_memcpy(&pmd->eth_addr, mac, ETHER_ADDR_LEN);
1209 eth_random_addr((uint8_t *)&pmd->eth_addr);
1212 tap_kernel_support(pmd);
1213 if (!pmd->flower_support)
1215 LIST_INIT(&pmd->flows);
1217 * If no netlink socket can be created, then it will fail when
1218 * creating/destroying flow rules.
1220 pmd->nlsk_fd = nl_init(0);
1221 if (strlen(remote_iface)) {
1224 pmd->remote_if_index = if_nametoindex(remote_iface);
1225 snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
1226 "%s", remote_iface);
1227 if (!pmd->remote_if_index) {
1228 RTE_LOG(ERR, PMD, "Could not find %s ifindex: "
1229 "remote interface will remain unconfigured\n",
1233 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
1234 RTE_LOG(ERR, PMD, "Could not get remote MAC address\n");
1237 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
1244 RTE_LOG(DEBUG, PMD, "TAP Unable to initialize %s\n",
1245 rte_vdev_device_name(vdev));
1252 set_interface_name(const char *key __rte_unused,
1256 char *name = (char *)extra_args;
1259 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s", value);
1261 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
1262 DEFAULT_TAP_NAME, (tap_unit - 1));
1268 set_interface_speed(const char *key __rte_unused,
1272 *(int *)extra_args = (value) ? atoi(value) : ETH_SPEED_NUM_10G;
1278 set_remote_iface(const char *key __rte_unused,
1282 char *name = (char *)extra_args;
1285 snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s", value);
1291 set_mac_type(const char *key __rte_unused,
1296 !strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED)))
1297 *(int *)extra_args = 1;
1301 /* Open a TAP interface device.
1304 rte_pmd_tap_probe(struct rte_vdev_device *dev)
1306 const char *name, *params;
1308 struct rte_kvargs *kvlist = NULL;
1310 char tap_name[RTE_ETH_NAME_MAX_LEN];
1311 char remote_iface[RTE_ETH_NAME_MAX_LEN];
1312 int fixed_mac_type = 0;
1314 name = rte_vdev_device_name(dev);
1315 params = rte_vdev_device_args(dev);
1317 speed = ETH_SPEED_NUM_10G;
1318 snprintf(tap_name, sizeof(tap_name), "%s%d",
1319 DEFAULT_TAP_NAME, tap_unit++);
1320 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
1322 if (params && (params[0] != '\0')) {
1323 RTE_LOG(DEBUG, PMD, "paramaters (%s)\n", params);
1325 kvlist = rte_kvargs_parse(params, valid_arguments);
1327 if (rte_kvargs_count(kvlist, ETH_TAP_SPEED_ARG) == 1) {
1328 ret = rte_kvargs_process(kvlist,
1330 &set_interface_speed,
1336 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
1337 ret = rte_kvargs_process(kvlist,
1339 &set_interface_name,
1345 if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
1346 ret = rte_kvargs_process(kvlist,
1354 if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
1355 ret = rte_kvargs_process(kvlist,
1364 pmd_link.link_speed = speed;
1366 RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
1369 ret = eth_dev_tap_create(dev, tap_name, remote_iface, fixed_mac_type);
1373 RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
1375 tap_unit--; /* Restore the unit number */
1377 rte_kvargs_free(kvlist);
1382 /* detach a TAP device.
1385 rte_pmd_tap_remove(struct rte_vdev_device *dev)
1387 struct rte_eth_dev *eth_dev = NULL;
1388 struct pmd_internals *internals;
1391 RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n",
1394 /* find the ethdev entry */
1395 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1399 internals = eth_dev->data->dev_private;
1400 if (internals->flower_support && internals->nlsk_fd) {
1401 tap_flow_flush(eth_dev, NULL);
1402 tap_flow_implicit_flush(internals, NULL);
1403 nl_final(internals->nlsk_fd);
1405 for (i = 0; i < internals->nb_queues; i++)
1406 if (internals->rxq[i].fd != -1)
1407 close(internals->rxq[i].fd);
1409 close(internals->ioctl_sock);
1410 rte_free(eth_dev->data->dev_private);
1411 rte_free(eth_dev->data);
1413 rte_eth_dev_release_port(eth_dev);
1418 static struct rte_vdev_driver pmd_tap_drv = {
1419 .probe = rte_pmd_tap_probe,
1420 .remove = rte_pmd_tap_remove,
1422 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
1423 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
1424 RTE_PMD_REGISTER_PARAM_STRING(net_tap,
1425 ETH_TAP_IFACE_ARG "=<string> "
1426 ETH_TAP_SPEED_ARG "=<int> "
1427 ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_FIXED " "
1428 ETH_TAP_REMOTE_ARG "=<string>");