1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
5 #include <rte_atomic.h>
6 #include <rte_branch_prediction.h>
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_kvargs.h>
16 #include <rte_debug.h>
19 #include <sys/types.h>
21 #include <sys/socket.h>
22 #include <sys/ioctl.h>
23 #include <sys/utsname.h>
31 #include <arpa/inet.h>
33 #include <linux/if_tun.h>
34 #include <linux/if_ether.h>
37 #include <rte_eth_tap.h>
39 #include <tap_netlink.h>
40 #include <tap_tcmsgs.h>
42 /* Linux based path to the TUN device */
43 #define TUN_TAP_DEV_PATH "/dev/net/tun"
44 #define DEFAULT_TAP_NAME "dtap"
46 #define ETH_TAP_IFACE_ARG "iface"
47 #define ETH_TAP_REMOTE_ARG "remote"
48 #define ETH_TAP_MAC_ARG "mac"
49 #define ETH_TAP_MAC_FIXED "fixed"
51 #define ETH_TAP_USR_MAC_FMT "xx:xx:xx:xx:xx:xx"
52 #define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef"
53 #define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
55 static struct rte_vdev_driver pmd_tap_drv;
57 static const char *valid_arguments[] = {
66 static volatile uint32_t tap_trigger; /* Rx trigger */
68 static struct rte_eth_link pmd_link = {
69 .link_speed = ETH_SPEED_NUM_10G,
70 .link_duplex = ETH_LINK_FULL_DUPLEX,
71 .link_status = ETH_LINK_DOWN,
72 .link_autoneg = ETH_LINK_AUTONEG
76 tap_trigger_cb(int sig __rte_unused)
78 /* Valid trigger values are nonzero */
79 tap_trigger = (tap_trigger + 1) | 0x80000000;
82 /* Specifies on what netdevices the ioctl should be applied */
89 static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
91 /* Tun/Tap allocation routine
93 * name is the number of the interface to use, unless NULL to take the host
97 tun_alloc(struct pmd_internals *pmd)
100 #ifdef IFF_MULTI_QUEUE
101 unsigned int features;
105 memset(&ifr, 0, sizeof(struct ifreq));
108 * Do not set IFF_NO_PI as packet information header will be needed
109 * to check if a received packet has been truncated.
111 ifr.ifr_flags = IFF_TAP;
112 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
114 RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name);
116 fd = open(TUN_TAP_DEV_PATH, O_RDWR);
118 RTE_LOG(ERR, PMD, "Unable to create TAP interface\n");
122 #ifdef IFF_MULTI_QUEUE
123 /* Grab the TUN features to verify we can work multi-queue */
124 if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
125 RTE_LOG(ERR, PMD, "TAP unable to get TUN/TAP features\n");
128 RTE_LOG(DEBUG, PMD, " TAP Features %08x\n", features);
130 if (features & IFF_MULTI_QUEUE) {
131 RTE_LOG(DEBUG, PMD, " Multi-queue support for %d queues\n",
132 RTE_PMD_TAP_MAX_QUEUES);
133 ifr.ifr_flags |= IFF_MULTI_QUEUE;
137 ifr.ifr_flags |= IFF_ONE_QUEUE;
138 RTE_LOG(DEBUG, PMD, " Single queue only support\n");
141 /* Set the TUN/TAP configuration and set the name if needed */
142 if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
143 RTE_LOG(WARNING, PMD,
144 "Unable to set TUNSETIFF for %s\n",
150 /* Always set the file descriptor to non-blocking */
151 if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
152 RTE_LOG(WARNING, PMD,
153 "Unable to set %s to nonblocking\n",
155 perror("F_SETFL, NONBLOCK");
159 /* Set up trigger to optimize empty Rx bursts */
163 int flags = fcntl(fd, F_GETFL);
165 if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
167 if (sa.sa_handler != tap_trigger_cb) {
169 * Make sure SIGIO is not already taken. This is done
170 * as late as possible to leave the application a
171 * chance to set up its own signal handler first.
173 if (sa.sa_handler != SIG_IGN &&
174 sa.sa_handler != SIG_DFL) {
178 sa = (struct sigaction){
179 .sa_flags = SA_RESTART,
180 .sa_handler = tap_trigger_cb,
182 if (sigaction(SIGIO, &sa, NULL) == -1)
185 /* Enable SIGIO on file descriptor */
186 fcntl(fd, F_SETFL, flags | O_ASYNC);
187 fcntl(fd, F_SETOWN, getpid());
190 /* Disable trigger globally in case of error */
192 RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n",
205 tap_verify_csum(struct rte_mbuf *mbuf)
207 uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
208 uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
209 uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
210 unsigned int l2_len = sizeof(struct ether_hdr);
216 if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
218 else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
220 /* Don't verify checksum for packets with discontinuous L2 header */
221 if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
222 rte_pktmbuf_data_len(mbuf)))
224 l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
225 if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
226 struct ipv4_hdr *iph = l3_hdr;
228 /* ihl contains the number of 4-byte words in the header */
229 l3_len = 4 * (iph->version_ihl & 0xf);
230 if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
233 cksum = ~rte_raw_cksum(iph, l3_len);
234 mbuf->ol_flags |= cksum ?
235 PKT_RX_IP_CKSUM_BAD :
236 PKT_RX_IP_CKSUM_GOOD;
237 } else if (l3 == RTE_PTYPE_L3_IPV6) {
238 l3_len = sizeof(struct ipv6_hdr);
240 /* IPv6 extensions are not supported */
243 if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
244 l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
245 /* Don't verify checksum for multi-segment packets. */
246 if (mbuf->nb_segs > 1)
248 if (l3 == RTE_PTYPE_L3_IPV4)
249 cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
250 else if (l3 == RTE_PTYPE_L3_IPV6)
251 cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
252 mbuf->ol_flags |= cksum ?
253 PKT_RX_L4_CKSUM_BAD :
254 PKT_RX_L4_CKSUM_GOOD;
259 tap_rx_offload_get_port_capa(void)
262 * In order to support legacy apps,
263 * report capabilities also as port capabilities.
265 return DEV_RX_OFFLOAD_SCATTER |
266 DEV_RX_OFFLOAD_IPV4_CKSUM |
267 DEV_RX_OFFLOAD_UDP_CKSUM |
268 DEV_RX_OFFLOAD_TCP_CKSUM |
269 DEV_RX_OFFLOAD_CRC_STRIP;
273 tap_rx_offload_get_queue_capa(void)
275 return DEV_RX_OFFLOAD_SCATTER |
276 DEV_RX_OFFLOAD_IPV4_CKSUM |
277 DEV_RX_OFFLOAD_UDP_CKSUM |
278 DEV_RX_OFFLOAD_TCP_CKSUM |
279 DEV_RX_OFFLOAD_CRC_STRIP;
283 tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
285 uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
286 uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
287 uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
289 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
292 if ((port_offloads ^ offloads) & port_supp_offloads)
297 /* Callback to handle the rx burst of packets to the correct interface and
298 * file descriptor(s) in a multi-queue setup.
301 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
303 struct rx_queue *rxq = queue;
305 unsigned long num_rx_bytes = 0;
306 uint32_t trigger = tap_trigger;
308 if (trigger == rxq->trigger_seen)
311 rxq->trigger_seen = trigger;
312 rte_compiler_barrier();
313 for (num_rx = 0; num_rx < nb_pkts; ) {
314 struct rte_mbuf *mbuf = rxq->pool;
315 struct rte_mbuf *seg = NULL;
316 struct rte_mbuf *new_tail = NULL;
317 uint16_t data_off = rte_pktmbuf_headroom(mbuf);
320 len = readv(rxq->fd, *rxq->iovecs,
322 (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
323 rxq->nb_rx_desc : 1));
324 if (len < (int)sizeof(struct tun_pi))
327 /* Packet couldn't fit in the provided mbuf */
328 if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
329 rxq->stats.ierrors++;
333 len -= sizeof(struct tun_pi);
336 mbuf->port = rxq->in_port;
338 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
340 if (unlikely(!buf)) {
341 rxq->stats.rx_nombuf++;
342 /* No new buf has been allocated: do nothing */
343 if (!new_tail || !seg)
347 rte_pktmbuf_free(mbuf);
351 seg = seg ? seg->next : mbuf;
352 if (rxq->pool == mbuf)
355 new_tail->next = buf;
357 new_tail->next = seg->next;
359 /* iovecs[0] is reserved for packet info (pi) */
360 (*rxq->iovecs)[mbuf->nb_segs].iov_len =
361 buf->buf_len - data_off;
362 (*rxq->iovecs)[mbuf->nb_segs].iov_base =
363 (char *)buf->buf_addr + data_off;
365 seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
366 seg->data_off = data_off;
368 len -= seg->data_len;
372 /* First segment has headroom, not the others */
376 mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
378 if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
379 tap_verify_csum(mbuf);
381 /* account for the receive frame */
382 bufs[num_rx++] = mbuf;
383 num_rx_bytes += mbuf->pkt_len;
386 rxq->stats.ipackets += num_rx;
387 rxq->stats.ibytes += num_rx_bytes;
393 tap_tx_offload_get_port_capa(void)
396 * In order to support legacy apps,
397 * report capabilities also as port capabilities.
399 return DEV_TX_OFFLOAD_MULTI_SEGS |
400 DEV_TX_OFFLOAD_IPV4_CKSUM |
401 DEV_TX_OFFLOAD_UDP_CKSUM |
402 DEV_TX_OFFLOAD_TCP_CKSUM;
406 tap_tx_offload_get_queue_capa(void)
408 return DEV_TX_OFFLOAD_MULTI_SEGS |
409 DEV_TX_OFFLOAD_IPV4_CKSUM |
410 DEV_TX_OFFLOAD_UDP_CKSUM |
411 DEV_TX_OFFLOAD_TCP_CKSUM;
415 tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
417 uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
418 uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
419 uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
421 if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
424 /* Verify we have no conflict with port offloads */
425 if ((port_offloads ^ offloads) & port_supp_offloads)
431 tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
434 void *l3_hdr = packet + l2_len;
436 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
437 struct ipv4_hdr *iph = l3_hdr;
440 iph->hdr_checksum = 0;
441 cksum = rte_raw_cksum(iph, l3_len);
442 iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
444 if (ol_flags & PKT_TX_L4_MASK) {
450 l4_hdr = packet + l2_len + l3_len;
451 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
452 l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
453 else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
454 l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
458 if (ol_flags & PKT_TX_IPV4) {
459 struct ipv4_hdr *iph = l3_hdr;
461 l4_len = rte_be_to_cpu_16(iph->total_length) - l3_len;
462 cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
464 struct ipv6_hdr *ip6h = l3_hdr;
466 /* payload_len does not include ext headers */
467 l4_len = rte_be_to_cpu_16(ip6h->payload_len) -
468 l3_len + sizeof(struct ipv6_hdr);
469 cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
471 cksum += rte_raw_cksum(l4_hdr, l4_len);
472 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
473 cksum = (~cksum) & 0xffff;
480 /* Callback to handle sending packets from the tap interface
483 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
485 struct tx_queue *txq = queue;
487 unsigned long num_tx_bytes = 0;
491 if (unlikely(nb_pkts == 0))
494 max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
495 for (i = 0; i < nb_pkts; i++) {
496 struct rte_mbuf *mbuf = bufs[num_tx];
497 struct iovec iovecs[mbuf->nb_segs + 1];
498 struct tun_pi pi = { .flags = 0 };
499 struct rte_mbuf *seg = mbuf;
500 char m_copy[mbuf->data_len];
504 /* stats.errs will be incremented */
505 if (rte_pktmbuf_pkt_len(mbuf) > max_size)
508 iovecs[0].iov_base = π
509 iovecs[0].iov_len = sizeof(pi);
510 for (j = 1; j <= mbuf->nb_segs; j++) {
511 iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
513 rte_pktmbuf_mtod(seg, void *);
517 ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
518 (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
519 (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
520 /* Support only packets with all data in the same seg */
521 if (mbuf->nb_segs > 1)
523 /* To change checksums, work on a copy of data. */
524 rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
525 rte_pktmbuf_data_len(mbuf));
526 tap_tx_offload(m_copy, mbuf->ol_flags,
527 mbuf->l2_len, mbuf->l3_len);
528 iovecs[1].iov_base = m_copy;
530 /* copy the tx frame data */
531 n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
536 num_tx_bytes += mbuf->pkt_len;
537 rte_pktmbuf_free(mbuf);
540 txq->stats.opackets += num_tx;
541 txq->stats.errs += nb_pkts - num_tx;
542 txq->stats.obytes += num_tx_bytes;
548 tap_ioctl_req2str(unsigned long request)
552 return "SIOCSIFFLAGS";
554 return "SIOCGIFFLAGS";
556 return "SIOCGIFHWADDR";
558 return "SIOCSIFHWADDR";
566 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
567 struct ifreq *ifr, int set, enum ioctl_mode mode)
569 short req_flags = ifr->ifr_flags;
570 int remote = pmd->remote_if_index &&
571 (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
573 if (!pmd->remote_if_index && mode == REMOTE_ONLY)
576 * If there is a remote netdevice, apply ioctl on it, then apply it on
581 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
582 else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
583 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
586 /* fetch current flags to leave other flags untouched */
587 if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
590 ifr->ifr_flags |= req_flags;
592 ifr->ifr_flags &= ~req_flags;
600 RTE_ASSERT(!"unsupported request type: must not happen");
602 if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
604 if (remote-- && mode == LOCAL_AND_REMOTE)
609 RTE_LOG(DEBUG, PMD, "%s: %s(%s) failed: %s(%d)\n", ifr->ifr_name,
610 __func__, tap_ioctl_req2str(request), strerror(errno), errno);
615 tap_link_set_down(struct rte_eth_dev *dev)
617 struct pmd_internals *pmd = dev->data->dev_private;
618 struct ifreq ifr = { .ifr_flags = IFF_UP };
620 dev->data->dev_link.link_status = ETH_LINK_DOWN;
621 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
625 tap_link_set_up(struct rte_eth_dev *dev)
627 struct pmd_internals *pmd = dev->data->dev_private;
628 struct ifreq ifr = { .ifr_flags = IFF_UP };
630 dev->data->dev_link.link_status = ETH_LINK_UP;
631 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
635 tap_dev_start(struct rte_eth_dev *dev)
639 err = tap_intr_handle_set(dev, 1);
642 return tap_link_set_up(dev);
645 /* This function gets called when the current port gets stopped.
648 tap_dev_stop(struct rte_eth_dev *dev)
650 tap_intr_handle_set(dev, 0);
651 tap_link_set_down(dev);
655 tap_dev_configure(struct rte_eth_dev *dev)
657 uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa();
658 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
660 if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
663 "Some Tx offloads are not supported "
664 "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
665 tx_offloads, supp_tx_offloads);
668 if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
670 "%s: number of rx queues %d exceeds max num of queues %d\n",
672 dev->data->nb_rx_queues,
673 RTE_PMD_TAP_MAX_QUEUES);
676 if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
678 "%s: number of tx queues %d exceeds max num of queues %d\n",
680 dev->data->nb_tx_queues,
681 RTE_PMD_TAP_MAX_QUEUES);
685 RTE_LOG(INFO, PMD, "%s: %p: TX configured queues number: %u\n",
686 dev->device->name, (void *)dev, dev->data->nb_tx_queues);
688 RTE_LOG(INFO, PMD, "%s: %p: RX configured queues number: %u\n",
689 dev->device->name, (void *)dev, dev->data->nb_rx_queues);
695 tap_dev_speed_capa(void)
697 uint32_t speed = pmd_link.link_speed;
700 if (speed >= ETH_SPEED_NUM_10M)
701 capa |= ETH_LINK_SPEED_10M;
702 if (speed >= ETH_SPEED_NUM_100M)
703 capa |= ETH_LINK_SPEED_100M;
704 if (speed >= ETH_SPEED_NUM_1G)
705 capa |= ETH_LINK_SPEED_1G;
706 if (speed >= ETH_SPEED_NUM_5G)
707 capa |= ETH_LINK_SPEED_2_5G;
708 if (speed >= ETH_SPEED_NUM_5G)
709 capa |= ETH_LINK_SPEED_5G;
710 if (speed >= ETH_SPEED_NUM_10G)
711 capa |= ETH_LINK_SPEED_10G;
712 if (speed >= ETH_SPEED_NUM_20G)
713 capa |= ETH_LINK_SPEED_20G;
714 if (speed >= ETH_SPEED_NUM_25G)
715 capa |= ETH_LINK_SPEED_25G;
716 if (speed >= ETH_SPEED_NUM_40G)
717 capa |= ETH_LINK_SPEED_40G;
718 if (speed >= ETH_SPEED_NUM_50G)
719 capa |= ETH_LINK_SPEED_50G;
720 if (speed >= ETH_SPEED_NUM_56G)
721 capa |= ETH_LINK_SPEED_56G;
722 if (speed >= ETH_SPEED_NUM_100G)
723 capa |= ETH_LINK_SPEED_100G;
729 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
731 struct pmd_internals *internals = dev->data->dev_private;
733 dev_info->if_index = internals->if_index;
734 dev_info->max_mac_addrs = 1;
735 dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
736 dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
737 dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
738 dev_info->min_rx_bufsize = 0;
739 dev_info->pci_dev = NULL;
740 dev_info->speed_capa = tap_dev_speed_capa();
741 dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
742 dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
743 dev_info->rx_queue_offload_capa;
744 dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
745 dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
746 dev_info->tx_queue_offload_capa;
750 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
752 unsigned int i, imax;
753 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
754 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
755 unsigned long rx_nombuf = 0, ierrors = 0;
756 const struct pmd_internals *pmd = dev->data->dev_private;
758 /* rx queue statistics */
759 imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
760 dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
761 for (i = 0; i < imax; i++) {
762 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
763 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
764 rx_total += tap_stats->q_ipackets[i];
765 rx_bytes_total += tap_stats->q_ibytes[i];
766 rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
767 ierrors += pmd->rxq[i].stats.ierrors;
770 /* tx queue statistics */
771 imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
772 dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
774 for (i = 0; i < imax; i++) {
775 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
776 tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
777 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
778 tx_total += tap_stats->q_opackets[i];
779 tx_err_total += tap_stats->q_errors[i];
780 tx_bytes_total += tap_stats->q_obytes[i];
783 tap_stats->ipackets = rx_total;
784 tap_stats->ibytes = rx_bytes_total;
785 tap_stats->ierrors = ierrors;
786 tap_stats->rx_nombuf = rx_nombuf;
787 tap_stats->opackets = tx_total;
788 tap_stats->oerrors = tx_err_total;
789 tap_stats->obytes = tx_bytes_total;
794 tap_stats_reset(struct rte_eth_dev *dev)
797 struct pmd_internals *pmd = dev->data->dev_private;
799 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
800 pmd->rxq[i].stats.ipackets = 0;
801 pmd->rxq[i].stats.ibytes = 0;
802 pmd->rxq[i].stats.ierrors = 0;
803 pmd->rxq[i].stats.rx_nombuf = 0;
805 pmd->txq[i].stats.opackets = 0;
806 pmd->txq[i].stats.errs = 0;
807 pmd->txq[i].stats.obytes = 0;
812 tap_dev_close(struct rte_eth_dev *dev)
815 struct pmd_internals *internals = dev->data->dev_private;
817 tap_link_set_down(dev);
818 tap_flow_flush(dev, NULL);
819 tap_flow_implicit_flush(internals, NULL);
821 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
822 if (internals->rxq[i].fd != -1) {
823 close(internals->rxq[i].fd);
824 internals->rxq[i].fd = -1;
826 if (internals->txq[i].fd != -1) {
827 close(internals->txq[i].fd);
828 internals->txq[i].fd = -1;
832 if (internals->remote_if_index) {
833 /* Restore initial remote state */
834 ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
835 &internals->remote_initial_flags);
840 tap_rx_queue_release(void *queue)
842 struct rx_queue *rxq = queue;
844 if (rxq && (rxq->fd > 0)) {
847 rte_pktmbuf_free(rxq->pool);
848 rte_free(rxq->iovecs);
855 tap_tx_queue_release(void *queue)
857 struct tx_queue *txq = queue;
859 if (txq && (txq->fd > 0)) {
866 tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
868 struct rte_eth_link *dev_link = &dev->data->dev_link;
869 struct pmd_internals *pmd = dev->data->dev_private;
870 struct ifreq ifr = { .ifr_flags = 0 };
872 if (pmd->remote_if_index) {
873 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
874 if (!(ifr.ifr_flags & IFF_UP) ||
875 !(ifr.ifr_flags & IFF_RUNNING)) {
876 dev_link->link_status = ETH_LINK_DOWN;
880 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
881 dev_link->link_status =
882 ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
889 tap_promisc_enable(struct rte_eth_dev *dev)
891 struct pmd_internals *pmd = dev->data->dev_private;
892 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
894 dev->data->promiscuous = 1;
895 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
896 if (pmd->remote_if_index && !pmd->flow_isolate)
897 tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
901 tap_promisc_disable(struct rte_eth_dev *dev)
903 struct pmd_internals *pmd = dev->data->dev_private;
904 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
906 dev->data->promiscuous = 0;
907 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
908 if (pmd->remote_if_index && !pmd->flow_isolate)
909 tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
913 tap_allmulti_enable(struct rte_eth_dev *dev)
915 struct pmd_internals *pmd = dev->data->dev_private;
916 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
918 dev->data->all_multicast = 1;
919 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
920 if (pmd->remote_if_index && !pmd->flow_isolate)
921 tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
925 tap_allmulti_disable(struct rte_eth_dev *dev)
927 struct pmd_internals *pmd = dev->data->dev_private;
928 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
930 dev->data->all_multicast = 0;
931 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
932 if (pmd->remote_if_index && !pmd->flow_isolate)
933 tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
937 tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
939 struct pmd_internals *pmd = dev->data->dev_private;
940 enum ioctl_mode mode = LOCAL_ONLY;
943 if (is_zero_ether_addr(mac_addr)) {
944 RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
948 /* Check the actual current MAC address on the tap netdevice */
949 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
951 if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
954 /* Check the current MAC address on the remote */
955 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0)
957 if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
959 mode = LOCAL_AND_REMOTE;
960 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
961 rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
962 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode) < 0)
964 rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
965 if (pmd->remote_if_index && !pmd->flow_isolate) {
966 /* Replace MAC redirection rule after a MAC change */
967 if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) {
969 "%s: Couldn't delete MAC redirection rule\n",
973 if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
975 "%s: Couldn't add MAC redirection rule\n",
981 tap_setup_queue(struct rte_eth_dev *dev,
982 struct pmd_internals *internals,
989 struct pmd_internals *pmd = dev->data->dev_private;
990 struct rx_queue *rx = &internals->rxq[qid];
991 struct tx_queue *tx = &internals->txq[qid];
1003 /* fd for this queue already exists */
1004 RTE_LOG(DEBUG, PMD, "%s: fd %d for %s queue qid %d exists\n",
1005 pmd->name, *fd, dir, qid);
1006 } else if (*other_fd != -1) {
1007 /* Only other_fd exists. dup it */
1008 *fd = dup(*other_fd);
1011 RTE_LOG(ERR, PMD, "%s: dup() failed.\n",
1015 RTE_LOG(DEBUG, PMD, "%s: dup fd %d for %s queue qid %d (%d)\n",
1016 pmd->name, *other_fd, dir, qid, *fd);
1018 /* Both RX and TX fds do not exist (equal -1). Create fd */
1019 *fd = tun_alloc(pmd);
1021 *fd = -1; /* restore original value */
1022 RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n",
1026 RTE_LOG(DEBUG, PMD, "%s: add %s queue for qid %d fd %d\n",
1027 pmd->name, dir, qid, *fd);
1030 tx->mtu = &dev->data->mtu;
1031 rx->rxmode = &dev->data->dev_conf.rxmode;
1037 tap_rx_queue_setup(struct rte_eth_dev *dev,
1038 uint16_t rx_queue_id,
1039 uint16_t nb_rx_desc,
1040 unsigned int socket_id,
1041 const struct rte_eth_rxconf *rx_conf __rte_unused,
1042 struct rte_mempool *mp)
1044 struct pmd_internals *internals = dev->data->dev_private;
1045 struct rx_queue *rxq = &internals->rxq[rx_queue_id];
1046 struct rte_mbuf **tmp = &rxq->pool;
1047 long iov_max = sysconf(_SC_IOV_MAX);
1048 uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
1049 struct iovec (*iovecs)[nb_desc + 1];
1050 int data_off = RTE_PKTMBUF_HEADROOM;
1055 if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
1056 RTE_LOG(WARNING, PMD,
1057 "nb_rx_queues %d too small or mempool NULL\n",
1058 dev->data->nb_rx_queues);
1062 /* Verify application offloads are valid for our port and queue. */
1063 if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
1064 rte_errno = ENOTSUP;
1066 "%p: Rx queue offloads 0x%" PRIx64
1067 " don't match port offloads 0x%" PRIx64
1068 " or supported offloads 0x%" PRIx64 "\n",
1069 (void *)dev, rx_conf->offloads,
1070 dev->data->dev_conf.rxmode.offloads,
1071 (tap_rx_offload_get_port_capa() |
1072 tap_rx_offload_get_queue_capa()));
1076 rxq->trigger_seen = 1; /* force initial burst */
1077 rxq->in_port = dev->data->port_id;
1078 rxq->nb_rx_desc = nb_desc;
1079 iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
1082 RTE_LOG(WARNING, PMD,
1083 "%s: Couldn't allocate %d RX descriptors\n",
1084 dev->device->name, nb_desc);
1087 rxq->iovecs = iovecs;
1089 dev->data->rx_queues[rx_queue_id] = rxq;
1090 fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
1096 (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
1097 (*rxq->iovecs)[0].iov_base = &rxq->pi;
1099 for (i = 1; i <= nb_desc; i++) {
1100 *tmp = rte_pktmbuf_alloc(rxq->mp);
1102 RTE_LOG(WARNING, PMD,
1103 "%s: couldn't allocate memory for queue %d\n",
1104 dev->device->name, rx_queue_id);
1108 (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
1109 (*rxq->iovecs)[i].iov_base =
1110 (char *)(*tmp)->buf_addr + data_off;
1112 tmp = &(*tmp)->next;
1115 RTE_LOG(DEBUG, PMD, " RX TAP device name %s, qid %d on fd %d\n",
1116 internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
1121 rte_pktmbuf_free(rxq->pool);
1123 rte_free(rxq->iovecs);
1129 tap_tx_queue_setup(struct rte_eth_dev *dev,
1130 uint16_t tx_queue_id,
1131 uint16_t nb_tx_desc __rte_unused,
1132 unsigned int socket_id __rte_unused,
1133 const struct rte_eth_txconf *tx_conf)
1135 struct pmd_internals *internals = dev->data->dev_private;
1136 struct tx_queue *txq;
1139 if (tx_queue_id >= dev->data->nb_tx_queues)
1141 dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
1142 txq = dev->data->tx_queues[tx_queue_id];
1144 * Don't verify port offloads for application which
1147 if (tx_conf != NULL &&
1148 !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
1149 if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
1150 txq->csum = !!(tx_conf->offloads &
1151 (DEV_TX_OFFLOAD_IPV4_CKSUM |
1152 DEV_TX_OFFLOAD_UDP_CKSUM |
1153 DEV_TX_OFFLOAD_TCP_CKSUM));
1155 rte_errno = ENOTSUP;
1157 "%p: Tx queue offloads 0x%" PRIx64
1158 " don't match port offloads 0x%" PRIx64
1159 " or supported offloads 0x%" PRIx64,
1160 (void *)dev, tx_conf->offloads,
1161 dev->data->dev_conf.txmode.offloads,
1162 tap_tx_offload_get_port_capa());
1166 ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
1170 " TX TAP device name %s, qid %d on fd %d csum %s\n",
1171 internals->name, tx_queue_id, internals->txq[tx_queue_id].fd,
1172 txq->csum ? "on" : "off");
1178 tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1180 struct pmd_internals *pmd = dev->data->dev_private;
1181 struct ifreq ifr = { .ifr_mtu = mtu };
1184 err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
1186 dev->data->mtu = mtu;
1192 tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
1193 struct ether_addr *mc_addr_set __rte_unused,
1194 uint32_t nb_mc_addr __rte_unused)
1197 * Nothing to do actually: the tap has no filtering whatsoever, every
1198 * packet is received.
1204 tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
1206 struct rte_eth_dev *dev = arg;
1207 struct pmd_internals *pmd = dev->data->dev_private;
1208 struct ifinfomsg *info = NLMSG_DATA(nh);
1210 if (nh->nlmsg_type != RTM_NEWLINK ||
1211 (info->ifi_index != pmd->if_index &&
1212 info->ifi_index != pmd->remote_if_index))
1214 return tap_link_update(dev, 0);
1218 tap_dev_intr_handler(void *cb_arg)
1220 struct rte_eth_dev *dev = cb_arg;
1221 struct pmd_internals *pmd = dev->data->dev_private;
1223 tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
1227 tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
1229 struct pmd_internals *pmd = dev->data->dev_private;
1231 /* In any case, disable interrupt if the conf is no longer there. */
1232 if (!dev->data->dev_conf.intr_conf.lsc) {
1233 if (pmd->intr_handle.fd != -1) {
1234 tap_nl_final(pmd->intr_handle.fd);
1235 rte_intr_callback_unregister(&pmd->intr_handle,
1236 tap_dev_intr_handler, dev);
1241 pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK);
1242 if (unlikely(pmd->intr_handle.fd == -1))
1244 return rte_intr_callback_register(
1245 &pmd->intr_handle, tap_dev_intr_handler, dev);
1247 tap_nl_final(pmd->intr_handle.fd);
1248 return rte_intr_callback_unregister(&pmd->intr_handle,
1249 tap_dev_intr_handler, dev);
1253 tap_intr_handle_set(struct rte_eth_dev *dev, int set)
1257 err = tap_lsc_intr_handle_set(dev, set);
1260 err = tap_rx_intr_vec_set(dev, set);
1262 tap_lsc_intr_handle_set(dev, 0);
1266 static const uint32_t*
1267 tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1269 static const uint32_t ptypes[] = {
1270 RTE_PTYPE_INNER_L2_ETHER,
1271 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1272 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1273 RTE_PTYPE_INNER_L3_IPV4,
1274 RTE_PTYPE_INNER_L3_IPV4_EXT,
1275 RTE_PTYPE_INNER_L3_IPV6,
1276 RTE_PTYPE_INNER_L3_IPV6_EXT,
1277 RTE_PTYPE_INNER_L4_FRAG,
1278 RTE_PTYPE_INNER_L4_UDP,
1279 RTE_PTYPE_INNER_L4_TCP,
1280 RTE_PTYPE_INNER_L4_SCTP,
1282 RTE_PTYPE_L2_ETHER_VLAN,
1283 RTE_PTYPE_L2_ETHER_QINQ,
1285 RTE_PTYPE_L3_IPV4_EXT,
1286 RTE_PTYPE_L3_IPV6_EXT,
1298 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
1299 struct rte_eth_fc_conf *fc_conf)
1301 fc_conf->mode = RTE_FC_NONE;
1306 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
1307 struct rte_eth_fc_conf *fc_conf)
1309 if (fc_conf->mode != RTE_FC_NONE)
1314 static const struct eth_dev_ops ops = {
1315 .dev_start = tap_dev_start,
1316 .dev_stop = tap_dev_stop,
1317 .dev_close = tap_dev_close,
1318 .dev_configure = tap_dev_configure,
1319 .dev_infos_get = tap_dev_info,
1320 .rx_queue_setup = tap_rx_queue_setup,
1321 .tx_queue_setup = tap_tx_queue_setup,
1322 .rx_queue_release = tap_rx_queue_release,
1323 .tx_queue_release = tap_tx_queue_release,
1324 .flow_ctrl_get = tap_flow_ctrl_get,
1325 .flow_ctrl_set = tap_flow_ctrl_set,
1326 .link_update = tap_link_update,
1327 .dev_set_link_up = tap_link_set_up,
1328 .dev_set_link_down = tap_link_set_down,
1329 .promiscuous_enable = tap_promisc_enable,
1330 .promiscuous_disable = tap_promisc_disable,
1331 .allmulticast_enable = tap_allmulti_enable,
1332 .allmulticast_disable = tap_allmulti_disable,
1333 .mac_addr_set = tap_mac_set,
1334 .mtu_set = tap_mtu_set,
1335 .set_mc_addr_list = tap_set_mc_addr_list,
1336 .stats_get = tap_stats_get,
1337 .stats_reset = tap_stats_reset,
1338 .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
1339 .filter_ctrl = tap_dev_filter_ctrl,
1343 eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
1344 char *remote_iface, struct ether_addr *mac_addr)
1346 int numa_node = rte_socket_id();
1347 struct rte_eth_dev *dev;
1348 struct pmd_internals *pmd;
1349 struct rte_eth_dev_data *data;
1353 RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
1355 data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node);
1357 RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n");
1358 goto error_exit_nodev;
1361 dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1363 RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n");
1364 goto error_exit_nodev;
1367 pmd = dev->data->dev_private;
1369 snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
1371 pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
1372 if (pmd->ioctl_sock == -1) {
1374 "TAP Unable to get a socket for management: %s\n",
1379 /* Setup some default values */
1380 rte_memcpy(data, dev->data, sizeof(*data));
1381 data->dev_private = pmd;
1382 data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1383 data->numa_node = numa_node;
1385 data->dev_link = pmd_link;
1386 data->mac_addrs = &pmd->eth_addr;
1387 /* Set the number of RX and TX queues */
1388 data->nb_rx_queues = 0;
1389 data->nb_tx_queues = 0;
1392 dev->dev_ops = &ops;
1393 dev->rx_pkt_burst = pmd_rx_burst;
1394 dev->tx_pkt_burst = pmd_tx_burst;
1396 pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
1397 pmd->intr_handle.fd = -1;
1398 dev->intr_handle = &pmd->intr_handle;
1400 /* Presetup the fds to -1 as being not valid */
1401 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1402 pmd->rxq[i].fd = -1;
1403 pmd->txq[i].fd = -1;
1406 if (is_zero_ether_addr(mac_addr))
1407 eth_random_addr((uint8_t *)&pmd->eth_addr);
1409 rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(mac_addr));
1411 /* Immediately create the netdevice (this will create the 1st queue). */
1413 if (tap_setup_queue(dev, pmd, 0, 1) == -1)
1416 if (tap_setup_queue(dev, pmd, 0, 0) == -1)
1419 ifr.ifr_mtu = dev->data->mtu;
1420 if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
1423 memset(&ifr, 0, sizeof(struct ifreq));
1424 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1425 rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, ETHER_ADDR_LEN);
1426 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
1430 * Set up everything related to rte_flow:
1432 * - tap / remote if_index
1433 * - mandatory QDISCs
1434 * - rte_flow actual/implicit lists
1437 pmd->nlsk_fd = tap_nl_init(0);
1438 if (pmd->nlsk_fd == -1) {
1439 RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n",
1441 goto disable_rte_flow;
1443 pmd->if_index = if_nametoindex(pmd->name);
1444 if (!pmd->if_index) {
1445 RTE_LOG(ERR, PMD, "%s: failed to get if_index.\n", pmd->name);
1446 goto disable_rte_flow;
1448 if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
1449 RTE_LOG(ERR, PMD, "%s: failed to create multiq qdisc.\n",
1451 goto disable_rte_flow;
1453 if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
1454 RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
1456 goto disable_rte_flow;
1458 LIST_INIT(&pmd->flows);
1460 if (strlen(remote_iface)) {
1461 pmd->remote_if_index = if_nametoindex(remote_iface);
1462 if (!pmd->remote_if_index) {
1463 RTE_LOG(ERR, PMD, "%s: failed to get %s if_index.\n",
1464 pmd->name, remote_iface);
1467 snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
1468 "%s", remote_iface);
1470 /* Save state of remote device */
1471 tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
1473 /* Replicate remote MAC address */
1474 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
1475 RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
1476 pmd->name, pmd->remote_iface);
1479 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
1481 /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
1482 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
1483 RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
1484 pmd->name, remote_iface);
1489 * Flush usually returns negative value because it tries to
1490 * delete every QDISC (and on a running device, one QDISC at
1491 * least is needed). Ignore negative return value.
1493 qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
1494 if (qdisc_create_ingress(pmd->nlsk_fd,
1495 pmd->remote_if_index) < 0) {
1496 RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
1500 LIST_INIT(&pmd->implicit_flows);
1501 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
1502 tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
1503 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
1504 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
1506 "%s: failed to create implicit rules.\n",
1515 RTE_LOG(ERR, PMD, " Disabling rte flow support: %s(%d)\n",
1516 strerror(errno), errno);
1517 if (strlen(remote_iface)) {
1518 RTE_LOG(ERR, PMD, "Remote feature requires flow support.\n");
1524 RTE_LOG(ERR, PMD, " Can't set up remote feature: %s(%d)\n",
1525 strerror(errno), errno);
1526 tap_flow_implicit_flush(pmd, NULL);
1529 if (pmd->ioctl_sock > 0)
1530 close(pmd->ioctl_sock);
1531 rte_eth_dev_release_port(dev);
1534 RTE_LOG(ERR, PMD, "TAP Unable to initialize %s\n",
1535 rte_vdev_device_name(vdev));
1542 set_interface_name(const char *key __rte_unused,
1546 char *name = (char *)extra_args;
1549 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s", value);
1551 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
1552 DEFAULT_TAP_NAME, (tap_unit - 1));
1558 set_remote_iface(const char *key __rte_unused,
1562 char *name = (char *)extra_args;
1565 snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s", value);
1570 static int parse_user_mac(struct ether_addr *user_mac,
1573 unsigned int index = 0;
1574 char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
1576 if (user_mac == NULL || value == NULL)
1579 snprintf(mac_temp, sizeof(mac_temp), "%s", value);
1580 mac_byte = strtok(mac_temp, ":");
1582 while ((mac_byte != NULL) &&
1583 (strlen(mac_byte) <= 2) &&
1584 (strlen(mac_byte) == strspn(mac_byte,
1585 ETH_TAP_CMP_MAC_FMT))) {
1586 user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
1587 mac_byte = strtok(NULL, ":");
1594 set_mac_type(const char *key __rte_unused,
1598 struct ether_addr *user_mac = extra_args;
1603 if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
1604 static int iface_idx;
1606 /* fixed mac = 00:64:74:61:70:<iface_idx> */
1607 memcpy((char *)user_mac->addr_bytes, "\0dtap", ETHER_ADDR_LEN);
1608 user_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0';
1612 if (parse_user_mac(user_mac, value) != 6)
1615 RTE_LOG(DEBUG, PMD, "TAP user MAC param (%s)\n", value);
1619 RTE_LOG(ERR, PMD, "TAP user MAC (%s) is not in format (%s|%s)\n",
1620 value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
1624 /* Open a TAP interface device.
1627 rte_pmd_tap_probe(struct rte_vdev_device *dev)
1629 const char *name, *params;
1631 struct rte_kvargs *kvlist = NULL;
1633 char tap_name[RTE_ETH_NAME_MAX_LEN];
1634 char remote_iface[RTE_ETH_NAME_MAX_LEN];
1635 struct ether_addr user_mac = { .addr_bytes = {0} };
1637 name = rte_vdev_device_name(dev);
1638 params = rte_vdev_device_args(dev);
1640 speed = ETH_SPEED_NUM_10G;
1641 snprintf(tap_name, sizeof(tap_name), "%s%d",
1642 DEFAULT_TAP_NAME, tap_unit++);
1643 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
1645 if (params && (params[0] != '\0')) {
1646 RTE_LOG(DEBUG, PMD, "parameters (%s)\n", params);
1648 kvlist = rte_kvargs_parse(params, valid_arguments);
1650 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
1651 ret = rte_kvargs_process(kvlist,
1653 &set_interface_name,
1659 if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
1660 ret = rte_kvargs_process(kvlist,
1668 if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
1669 ret = rte_kvargs_process(kvlist,
1678 pmd_link.link_speed = speed;
1680 RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
1683 ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac);
1687 RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
1689 tap_unit--; /* Restore the unit number */
1691 rte_kvargs_free(kvlist);
1696 /* detach a TAP device.
1699 rte_pmd_tap_remove(struct rte_vdev_device *dev)
1701 struct rte_eth_dev *eth_dev = NULL;
1702 struct pmd_internals *internals;
1705 RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n",
1708 /* find the ethdev entry */
1709 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1713 internals = eth_dev->data->dev_private;
1714 if (internals->nlsk_fd) {
1715 tap_flow_flush(eth_dev, NULL);
1716 tap_flow_implicit_flush(internals, NULL);
1717 tap_nl_final(internals->nlsk_fd);
1719 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1720 if (internals->rxq[i].fd != -1) {
1721 close(internals->rxq[i].fd);
1722 internals->rxq[i].fd = -1;
1724 if (internals->txq[i].fd != -1) {
1725 close(internals->txq[i].fd);
1726 internals->txq[i].fd = -1;
1730 close(internals->ioctl_sock);
1731 rte_free(eth_dev->data->dev_private);
1732 rte_free(eth_dev->data);
1734 rte_eth_dev_release_port(eth_dev);
1739 static struct rte_vdev_driver pmd_tap_drv = {
1740 .probe = rte_pmd_tap_probe,
1741 .remove = rte_pmd_tap_remove,
1743 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
1744 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
1745 RTE_PMD_REGISTER_PARAM_STRING(net_tap,
1746 ETH_TAP_IFACE_ARG "=<string> "
1747 ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
1748 ETH_TAP_REMOTE_ARG "=<string>");