1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
5 #include <rte_atomic.h>
6 #include <rte_branch_prediction.h>
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_kvargs.h>
16 #include <rte_debug.h>
18 #include <rte_string_fns.h>
21 #include <sys/types.h>
23 #include <sys/socket.h>
24 #include <sys/ioctl.h>
25 #include <sys/utsname.h>
33 #include <arpa/inet.h>
35 #include <linux/if_tun.h>
36 #include <linux/if_ether.h>
40 #include <rte_eth_tap.h>
42 #include <tap_netlink.h>
43 #include <tap_tcmsgs.h>
45 /* Linux based path to the TUN device */
46 #define TUN_TAP_DEV_PATH "/dev/net/tun"
47 #define DEFAULT_TAP_NAME "dtap"
48 #define DEFAULT_TUN_NAME "dtun"
50 #define ETH_TAP_IFACE_ARG "iface"
51 #define ETH_TAP_REMOTE_ARG "remote"
52 #define ETH_TAP_MAC_ARG "mac"
53 #define ETH_TAP_MAC_FIXED "fixed"
55 #define ETH_TAP_USR_MAC_FMT "xx:xx:xx:xx:xx:xx"
56 #define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef"
57 #define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
59 #define TAP_GSO_MBUFS_PER_CORE 128
60 #define TAP_GSO_MBUF_SEG_SIZE 128
61 #define TAP_GSO_MBUF_CACHE_SIZE 4
62 #define TAP_GSO_MBUFS_NUM \
63 (TAP_GSO_MBUFS_PER_CORE * TAP_GSO_MBUF_CACHE_SIZE)
65 static struct rte_vdev_driver pmd_tap_drv;
66 static struct rte_vdev_driver pmd_tun_drv;
68 static const char *valid_arguments[] = {
75 static unsigned int tap_unit;
76 static unsigned int tun_unit;
78 static char tuntap_name[8];
80 static volatile uint32_t tap_trigger; /* Rx trigger */
82 static struct rte_eth_link pmd_link = {
83 .link_speed = ETH_SPEED_NUM_10G,
84 .link_duplex = ETH_LINK_FULL_DUPLEX,
85 .link_status = ETH_LINK_DOWN,
86 .link_autoneg = ETH_LINK_FIXED,
90 tap_trigger_cb(int sig __rte_unused)
92 /* Valid trigger values are nonzero */
93 tap_trigger = (tap_trigger + 1) | 0x80000000;
96 /* Specifies on what netdevices the ioctl should be applied */
103 static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
106 * Tun/Tap allocation routine
109 * Pointer to private structure.
111 * @param[in] is_keepalive
115 * -1 on failure, fd on success
118 tun_alloc(struct pmd_internals *pmd, int is_keepalive)
121 #ifdef IFF_MULTI_QUEUE
122 unsigned int features;
126 memset(&ifr, 0, sizeof(struct ifreq));
129 * Do not set IFF_NO_PI as packet information header will be needed
130 * to check if a received packet has been truncated.
132 ifr.ifr_flags = (pmd->type == ETH_TUNTAP_TYPE_TAP) ?
133 IFF_TAP : IFF_TUN | IFF_POINTOPOINT;
134 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
136 TAP_LOG(DEBUG, "ifr_name '%s'", ifr.ifr_name);
138 fd = open(TUN_TAP_DEV_PATH, O_RDWR);
140 TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
144 #ifdef IFF_MULTI_QUEUE
145 /* Grab the TUN features to verify we can work multi-queue */
146 if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
147 TAP_LOG(ERR, "%s unable to get TUN/TAP features",
151 TAP_LOG(DEBUG, "%s Features %08x", tuntap_name, features);
153 if (features & IFF_MULTI_QUEUE) {
154 TAP_LOG(DEBUG, " Multi-queue support for %d queues",
155 RTE_PMD_TAP_MAX_QUEUES);
156 ifr.ifr_flags |= IFF_MULTI_QUEUE;
160 ifr.ifr_flags |= IFF_ONE_QUEUE;
161 TAP_LOG(DEBUG, " Single queue only support");
164 /* Set the TUN/TAP configuration and set the name if needed */
165 if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
166 TAP_LOG(WARNING, "Unable to set TUNSETIFF for %s: %s",
167 ifr.ifr_name, strerror(errno));
173 * Detach the TUN/TAP keep-alive queue
174 * to avoid traffic through it
176 ifr.ifr_flags = IFF_DETACH_QUEUE;
177 if (ioctl(fd, TUNSETQUEUE, (void *)&ifr) < 0) {
179 "Unable to detach keep-alive queue for %s: %s",
180 ifr.ifr_name, strerror(errno));
185 /* Always set the file descriptor to non-blocking */
186 if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
188 "Unable to set %s to nonblocking: %s",
189 ifr.ifr_name, strerror(errno));
193 /* Set up trigger to optimize empty Rx bursts */
197 int flags = fcntl(fd, F_GETFL);
199 if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
201 if (sa.sa_handler != tap_trigger_cb) {
203 * Make sure SIGIO is not already taken. This is done
204 * as late as possible to leave the application a
205 * chance to set up its own signal handler first.
207 if (sa.sa_handler != SIG_IGN &&
208 sa.sa_handler != SIG_DFL) {
212 sa = (struct sigaction){
213 .sa_flags = SA_RESTART,
214 .sa_handler = tap_trigger_cb,
216 if (sigaction(SIGIO, &sa, NULL) == -1)
219 /* Enable SIGIO on file descriptor */
220 fcntl(fd, F_SETFL, flags | O_ASYNC);
221 fcntl(fd, F_SETOWN, getpid());
225 /* Disable trigger globally in case of error */
227 TAP_LOG(WARNING, "Rx trigger disabled: %s",
240 tap_verify_csum(struct rte_mbuf *mbuf)
242 uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
243 uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
244 uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
245 unsigned int l2_len = sizeof(struct ether_hdr);
251 if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
253 else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
255 /* Don't verify checksum for packets with discontinuous L2 header */
256 if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
257 rte_pktmbuf_data_len(mbuf)))
259 l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
260 if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
261 struct ipv4_hdr *iph = l3_hdr;
263 /* ihl contains the number of 4-byte words in the header */
264 l3_len = 4 * (iph->version_ihl & 0xf);
265 if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
268 cksum = ~rte_raw_cksum(iph, l3_len);
269 mbuf->ol_flags |= cksum ?
270 PKT_RX_IP_CKSUM_BAD :
271 PKT_RX_IP_CKSUM_GOOD;
272 } else if (l3 == RTE_PTYPE_L3_IPV6) {
273 l3_len = sizeof(struct ipv6_hdr);
275 /* IPv6 extensions are not supported */
278 if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
279 l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
280 /* Don't verify checksum for multi-segment packets. */
281 if (mbuf->nb_segs > 1)
283 if (l3 == RTE_PTYPE_L3_IPV4)
284 cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
285 else if (l3 == RTE_PTYPE_L3_IPV6)
286 cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
287 mbuf->ol_flags |= cksum ?
288 PKT_RX_L4_CKSUM_BAD :
289 PKT_RX_L4_CKSUM_GOOD;
294 tap_rx_offload_get_port_capa(void)
297 * No specific port Rx offload capabilities.
303 tap_rx_offload_get_queue_capa(void)
305 return DEV_RX_OFFLOAD_SCATTER |
306 DEV_RX_OFFLOAD_IPV4_CKSUM |
307 DEV_RX_OFFLOAD_UDP_CKSUM |
308 DEV_RX_OFFLOAD_TCP_CKSUM;
311 /* Callback to handle the rx burst of packets to the correct interface and
312 * file descriptor(s) in a multi-queue setup.
315 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
317 struct rx_queue *rxq = queue;
318 struct pmd_process_private *process_private;
320 unsigned long num_rx_bytes = 0;
321 uint32_t trigger = tap_trigger;
323 if (trigger == rxq->trigger_seen)
326 rxq->trigger_seen = trigger;
327 process_private = rte_eth_devices[rxq->in_port].process_private;
328 rte_compiler_barrier();
329 for (num_rx = 0; num_rx < nb_pkts; ) {
330 struct rte_mbuf *mbuf = rxq->pool;
331 struct rte_mbuf *seg = NULL;
332 struct rte_mbuf *new_tail = NULL;
333 uint16_t data_off = rte_pktmbuf_headroom(mbuf);
336 len = readv(process_private->rxq_fds[rxq->queue_id],
338 1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
339 rxq->nb_rx_desc : 1));
340 if (len < (int)sizeof(struct tun_pi))
343 /* Packet couldn't fit in the provided mbuf */
344 if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
345 rxq->stats.ierrors++;
349 len -= sizeof(struct tun_pi);
352 mbuf->port = rxq->in_port;
354 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
356 if (unlikely(!buf)) {
357 rxq->stats.rx_nombuf++;
358 /* No new buf has been allocated: do nothing */
359 if (!new_tail || !seg)
363 rte_pktmbuf_free(mbuf);
367 seg = seg ? seg->next : mbuf;
368 if (rxq->pool == mbuf)
371 new_tail->next = buf;
373 new_tail->next = seg->next;
375 /* iovecs[0] is reserved for packet info (pi) */
376 (*rxq->iovecs)[mbuf->nb_segs].iov_len =
377 buf->buf_len - data_off;
378 (*rxq->iovecs)[mbuf->nb_segs].iov_base =
379 (char *)buf->buf_addr + data_off;
381 seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
382 seg->data_off = data_off;
384 len -= seg->data_len;
388 /* First segment has headroom, not the others */
392 mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
394 if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
395 tap_verify_csum(mbuf);
397 /* account for the receive frame */
398 bufs[num_rx++] = mbuf;
399 num_rx_bytes += mbuf->pkt_len;
402 rxq->stats.ipackets += num_rx;
403 rxq->stats.ibytes += num_rx_bytes;
409 tap_tx_offload_get_port_capa(void)
412 * No specific port Tx offload capabilities.
418 tap_tx_offload_get_queue_capa(void)
420 return DEV_TX_OFFLOAD_MULTI_SEGS |
421 DEV_TX_OFFLOAD_IPV4_CKSUM |
422 DEV_TX_OFFLOAD_UDP_CKSUM |
423 DEV_TX_OFFLOAD_TCP_CKSUM |
424 DEV_TX_OFFLOAD_TCP_TSO;
427 /* Finalize l4 checksum calculation */
429 tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
430 uint32_t l4_raw_cksum)
435 cksum = __rte_raw_cksum_reduce(l4_raw_cksum);
436 cksum += l4_phdr_cksum;
438 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
439 cksum = (~cksum) & 0xffff;
446 /* Accumaulate L4 raw checksums */
448 tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
449 uint32_t *l4_raw_cksum)
451 if (l4_cksum == NULL)
454 *l4_raw_cksum = __rte_raw_cksum(l4_data, l4_len, *l4_raw_cksum);
457 /* L3 and L4 pseudo headers checksum offloads */
459 tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
460 unsigned int l3_len, unsigned int l4_len, uint16_t **l4_cksum,
461 uint16_t *l4_phdr_cksum, uint32_t *l4_raw_cksum)
463 void *l3_hdr = packet + l2_len;
465 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
466 struct ipv4_hdr *iph = l3_hdr;
469 iph->hdr_checksum = 0;
470 cksum = rte_raw_cksum(iph, l3_len);
471 iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
473 if (ol_flags & PKT_TX_L4_MASK) {
476 l4_hdr = packet + l2_len + l3_len;
477 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
478 *l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
479 else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
480 *l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
484 if (ol_flags & PKT_TX_IPV4)
485 *l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
487 *l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
488 *l4_raw_cksum = __rte_raw_cksum(l4_hdr, l4_len, 0);
493 tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
494 struct rte_mbuf **pmbufs,
495 uint16_t *num_packets, unsigned long *num_tx_bytes)
499 struct pmd_process_private *process_private;
501 process_private = rte_eth_devices[txq->out_port].process_private;
503 for (i = 0; i < num_mbufs; i++) {
504 struct rte_mbuf *mbuf = pmbufs[i];
505 struct iovec iovecs[mbuf->nb_segs + 2];
506 struct tun_pi pi = { .flags = 0, .proto = 0x00 };
507 struct rte_mbuf *seg = mbuf;
508 char m_copy[mbuf->data_len];
512 int k; /* current index in iovecs for copying segments */
513 uint16_t seg_len; /* length of first segment */
515 uint16_t *l4_cksum; /* l4 checksum (pseudo header + payload) */
516 uint32_t l4_raw_cksum = 0; /* TCP/UDP payload raw checksum */
517 uint16_t l4_phdr_cksum = 0; /* TCP/UDP pseudo header checksum */
518 uint16_t is_cksum = 0; /* in case cksum should be offloaded */
521 if (txq->type == ETH_TUNTAP_TYPE_TUN) {
523 * TUN and TAP are created with IFF_NO_PI disabled.
524 * For TUN PMD this mandatory as fields are used by
525 * Kernel tun.c to determine whether its IP or non IP
528 * The logic fetches the first byte of data from mbuf
529 * then compares whether its v4 or v6. If first byte
530 * is 4 or 6, then protocol field is updated.
532 char *buff_data = rte_pktmbuf_mtod(seg, void *);
533 proto = (*buff_data & 0xf0);
534 pi.proto = (proto == 0x40) ?
535 rte_cpu_to_be_16(ETHER_TYPE_IPv4) :
537 rte_cpu_to_be_16(ETHER_TYPE_IPv6) :
542 iovecs[k].iov_base = π
543 iovecs[k].iov_len = sizeof(pi);
546 nb_segs = mbuf->nb_segs;
548 ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
549 (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
550 (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
553 /* Support only packets with at least layer 4
554 * header included in the first segment
556 seg_len = rte_pktmbuf_data_len(mbuf);
557 l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
558 if (seg_len < l234_hlen)
561 /* To change checksums, work on a * copy of l2, l3
562 * headers + l4 pseudo header
564 rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
566 tap_tx_l3_cksum(m_copy, mbuf->ol_flags,
567 mbuf->l2_len, mbuf->l3_len, mbuf->l4_len,
568 &l4_cksum, &l4_phdr_cksum,
570 iovecs[k].iov_base = m_copy;
571 iovecs[k].iov_len = l234_hlen;
574 /* Update next iovecs[] beyond l2, l3, l4 headers */
575 if (seg_len > l234_hlen) {
576 iovecs[k].iov_len = seg_len - l234_hlen;
578 rte_pktmbuf_mtod(seg, char *) +
580 tap_tx_l4_add_rcksum(iovecs[k].iov_base,
581 iovecs[k].iov_len, l4_cksum,
589 for (j = k; j <= nb_segs; j++) {
590 iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
591 iovecs[j].iov_base = rte_pktmbuf_mtod(seg, void *);
593 tap_tx_l4_add_rcksum(iovecs[j].iov_base,
594 iovecs[j].iov_len, l4_cksum,
600 tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
602 /* copy the tx frame data */
603 n = writev(process_private->txq_fds[txq->queue_id], iovecs, j);
607 (*num_tx_bytes) += rte_pktmbuf_pkt_len(mbuf);
611 /* Callback to handle sending packets from the tap interface
614 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
616 struct tx_queue *txq = queue;
618 uint16_t num_packets = 0;
619 unsigned long num_tx_bytes = 0;
623 if (unlikely(nb_pkts == 0))
626 struct rte_mbuf *gso_mbufs[MAX_GSO_MBUFS];
627 max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
628 for (i = 0; i < nb_pkts; i++) {
629 struct rte_mbuf *mbuf_in = bufs[num_tx];
630 struct rte_mbuf **mbuf;
631 uint16_t num_mbufs = 0;
632 uint16_t tso_segsz = 0;
638 tso = mbuf_in->ol_flags & PKT_TX_TCP_SEG;
640 struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
642 assert(gso_ctx != NULL);
644 /* TCP segmentation implies TCP checksum offload */
645 mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;
647 /* gso size is calculated without ETHER_CRC_LEN */
648 hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
650 tso_segsz = mbuf_in->tso_segsz + hdrs_len;
651 if (unlikely(tso_segsz == hdrs_len) ||
652 tso_segsz > *txq->mtu) {
656 gso_ctx->gso_size = tso_segsz;
657 ret = rte_gso_segment(mbuf_in, /* packet to segment */
658 gso_ctx, /* gso control block */
659 (struct rte_mbuf **)&gso_mbufs, /* out mbufs */
660 RTE_DIM(gso_mbufs)); /* max tso mbufs */
662 /* ret contains the number of new created mbufs */
669 /* stats.errs will be incremented */
670 if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
673 /* ret 0 indicates no new mbufs were created */
679 tap_write_mbufs(txq, num_mbufs, mbuf,
680 &num_packets, &num_tx_bytes);
682 /* free original mbuf */
683 rte_pktmbuf_free(mbuf_in);
685 for (j = 0; j < ret; j++)
686 rte_pktmbuf_free(mbuf[j]);
689 txq->stats.opackets += num_packets;
690 txq->stats.errs += nb_pkts - num_tx;
691 txq->stats.obytes += num_tx_bytes;
697 tap_ioctl_req2str(unsigned long request)
701 return "SIOCSIFFLAGS";
703 return "SIOCGIFFLAGS";
705 return "SIOCGIFHWADDR";
707 return "SIOCSIFHWADDR";
715 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
716 struct ifreq *ifr, int set, enum ioctl_mode mode)
718 short req_flags = ifr->ifr_flags;
719 int remote = pmd->remote_if_index &&
720 (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
722 if (!pmd->remote_if_index && mode == REMOTE_ONLY)
725 * If there is a remote netdevice, apply ioctl on it, then apply it on
730 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
731 else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
732 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
735 /* fetch current flags to leave other flags untouched */
736 if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
739 ifr->ifr_flags |= req_flags;
741 ifr->ifr_flags &= ~req_flags;
749 RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
753 if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
755 if (remote-- && mode == LOCAL_AND_REMOTE)
760 TAP_LOG(DEBUG, "%s(%s) failed: %s(%d)", ifr->ifr_name,
761 tap_ioctl_req2str(request), strerror(errno), errno);
766 tap_link_set_down(struct rte_eth_dev *dev)
768 struct pmd_internals *pmd = dev->data->dev_private;
769 struct ifreq ifr = { .ifr_flags = IFF_UP };
771 dev->data->dev_link.link_status = ETH_LINK_DOWN;
772 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
776 tap_link_set_up(struct rte_eth_dev *dev)
778 struct pmd_internals *pmd = dev->data->dev_private;
779 struct ifreq ifr = { .ifr_flags = IFF_UP };
781 dev->data->dev_link.link_status = ETH_LINK_UP;
782 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
786 tap_dev_start(struct rte_eth_dev *dev)
790 err = tap_intr_handle_set(dev, 1);
794 err = tap_link_set_up(dev);
798 for (i = 0; i < dev->data->nb_tx_queues; i++)
799 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
800 for (i = 0; i < dev->data->nb_rx_queues; i++)
801 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
806 /* This function gets called when the current port gets stopped.
809 tap_dev_stop(struct rte_eth_dev *dev)
813 for (i = 0; i < dev->data->nb_tx_queues; i++)
814 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
815 for (i = 0; i < dev->data->nb_rx_queues; i++)
816 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
818 tap_intr_handle_set(dev, 0);
819 tap_link_set_down(dev);
823 tap_dev_configure(struct rte_eth_dev *dev)
825 if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
827 "%s: number of rx queues %d exceeds max num of queues %d",
829 dev->data->nb_rx_queues,
830 RTE_PMD_TAP_MAX_QUEUES);
833 if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
835 "%s: number of tx queues %d exceeds max num of queues %d",
837 dev->data->nb_tx_queues,
838 RTE_PMD_TAP_MAX_QUEUES);
842 TAP_LOG(INFO, "%s: %p: TX configured queues number: %u",
843 dev->device->name, (void *)dev, dev->data->nb_tx_queues);
845 TAP_LOG(INFO, "%s: %p: RX configured queues number: %u",
846 dev->device->name, (void *)dev, dev->data->nb_rx_queues);
852 tap_dev_speed_capa(void)
854 uint32_t speed = pmd_link.link_speed;
857 if (speed >= ETH_SPEED_NUM_10M)
858 capa |= ETH_LINK_SPEED_10M;
859 if (speed >= ETH_SPEED_NUM_100M)
860 capa |= ETH_LINK_SPEED_100M;
861 if (speed >= ETH_SPEED_NUM_1G)
862 capa |= ETH_LINK_SPEED_1G;
863 if (speed >= ETH_SPEED_NUM_5G)
864 capa |= ETH_LINK_SPEED_2_5G;
865 if (speed >= ETH_SPEED_NUM_5G)
866 capa |= ETH_LINK_SPEED_5G;
867 if (speed >= ETH_SPEED_NUM_10G)
868 capa |= ETH_LINK_SPEED_10G;
869 if (speed >= ETH_SPEED_NUM_20G)
870 capa |= ETH_LINK_SPEED_20G;
871 if (speed >= ETH_SPEED_NUM_25G)
872 capa |= ETH_LINK_SPEED_25G;
873 if (speed >= ETH_SPEED_NUM_40G)
874 capa |= ETH_LINK_SPEED_40G;
875 if (speed >= ETH_SPEED_NUM_50G)
876 capa |= ETH_LINK_SPEED_50G;
877 if (speed >= ETH_SPEED_NUM_56G)
878 capa |= ETH_LINK_SPEED_56G;
879 if (speed >= ETH_SPEED_NUM_100G)
880 capa |= ETH_LINK_SPEED_100G;
886 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
888 struct pmd_internals *internals = dev->data->dev_private;
890 dev_info->if_index = internals->if_index;
891 dev_info->max_mac_addrs = 1;
892 dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
893 dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
894 dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
895 dev_info->min_rx_bufsize = 0;
896 dev_info->speed_capa = tap_dev_speed_capa();
897 dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
898 dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
899 dev_info->rx_queue_offload_capa;
900 dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
901 dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
902 dev_info->tx_queue_offload_capa;
903 dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
905 * limitation: TAP supports all of IP, UDP and TCP hash
906 * functions together and not in partial combinations
908 dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
912 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
914 unsigned int i, imax;
915 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
916 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
917 unsigned long rx_nombuf = 0, ierrors = 0;
918 const struct pmd_internals *pmd = dev->data->dev_private;
920 /* rx queue statistics */
921 imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
922 dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
923 for (i = 0; i < imax; i++) {
924 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
925 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
926 rx_total += tap_stats->q_ipackets[i];
927 rx_bytes_total += tap_stats->q_ibytes[i];
928 rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
929 ierrors += pmd->rxq[i].stats.ierrors;
932 /* tx queue statistics */
933 imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
934 dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
936 for (i = 0; i < imax; i++) {
937 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
938 tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
939 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
940 tx_total += tap_stats->q_opackets[i];
941 tx_err_total += tap_stats->q_errors[i];
942 tx_bytes_total += tap_stats->q_obytes[i];
945 tap_stats->ipackets = rx_total;
946 tap_stats->ibytes = rx_bytes_total;
947 tap_stats->ierrors = ierrors;
948 tap_stats->rx_nombuf = rx_nombuf;
949 tap_stats->opackets = tx_total;
950 tap_stats->oerrors = tx_err_total;
951 tap_stats->obytes = tx_bytes_total;
956 tap_stats_reset(struct rte_eth_dev *dev)
959 struct pmd_internals *pmd = dev->data->dev_private;
961 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
962 pmd->rxq[i].stats.ipackets = 0;
963 pmd->rxq[i].stats.ibytes = 0;
964 pmd->rxq[i].stats.ierrors = 0;
965 pmd->rxq[i].stats.rx_nombuf = 0;
967 pmd->txq[i].stats.opackets = 0;
968 pmd->txq[i].stats.errs = 0;
969 pmd->txq[i].stats.obytes = 0;
974 tap_dev_close(struct rte_eth_dev *dev)
977 struct pmd_internals *internals = dev->data->dev_private;
978 struct pmd_process_private *process_private = dev->process_private;
980 tap_link_set_down(dev);
981 tap_flow_flush(dev, NULL);
982 tap_flow_implicit_flush(internals, NULL);
984 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
985 if (process_private->rxq_fds[i] != -1) {
986 close(process_private->rxq_fds[i]);
987 process_private->rxq_fds[i] = -1;
989 if (process_private->txq_fds[i] != -1) {
990 close(process_private->txq_fds[i]);
991 process_private->txq_fds[i] = -1;
995 if (internals->remote_if_index) {
996 /* Restore initial remote state */
997 ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
998 &internals->remote_initial_flags);
1001 if (internals->ka_fd != -1) {
1002 close(internals->ka_fd);
1003 internals->ka_fd = -1;
1006 * Since TUN device has no more opened file descriptors
1007 * it will be removed from kernel
1012 tap_rx_queue_release(void *queue)
1014 struct rx_queue *rxq = queue;
1015 struct pmd_process_private *process_private;
1019 process_private = rte_eth_devices[rxq->in_port].process_private;
1020 if (process_private->rxq_fds[rxq->queue_id] > 0) {
1021 close(process_private->rxq_fds[rxq->queue_id]);
1022 process_private->rxq_fds[rxq->queue_id] = -1;
1023 rte_pktmbuf_free(rxq->pool);
1024 rte_free(rxq->iovecs);
1031 tap_tx_queue_release(void *queue)
1033 struct tx_queue *txq = queue;
1034 struct pmd_process_private *process_private;
1038 process_private = rte_eth_devices[txq->out_port].process_private;
1040 if (process_private->txq_fds[txq->queue_id] > 0) {
1041 close(process_private->txq_fds[txq->queue_id]);
1042 process_private->txq_fds[txq->queue_id] = -1;
1047 tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
1049 struct rte_eth_link *dev_link = &dev->data->dev_link;
1050 struct pmd_internals *pmd = dev->data->dev_private;
1051 struct ifreq ifr = { .ifr_flags = 0 };
1053 if (pmd->remote_if_index) {
1054 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
1055 if (!(ifr.ifr_flags & IFF_UP) ||
1056 !(ifr.ifr_flags & IFF_RUNNING)) {
1057 dev_link->link_status = ETH_LINK_DOWN;
1061 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
1062 dev_link->link_status =
1063 ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
1070 tap_promisc_enable(struct rte_eth_dev *dev)
1072 struct pmd_internals *pmd = dev->data->dev_private;
1073 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
1075 dev->data->promiscuous = 1;
1076 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1077 if (pmd->remote_if_index && !pmd->flow_isolate)
1078 tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
1082 tap_promisc_disable(struct rte_eth_dev *dev)
1084 struct pmd_internals *pmd = dev->data->dev_private;
1085 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
1087 dev->data->promiscuous = 0;
1088 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1089 if (pmd->remote_if_index && !pmd->flow_isolate)
1090 tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
1094 tap_allmulti_enable(struct rte_eth_dev *dev)
1096 struct pmd_internals *pmd = dev->data->dev_private;
1097 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
1099 dev->data->all_multicast = 1;
1100 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1101 if (pmd->remote_if_index && !pmd->flow_isolate)
1102 tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
1106 tap_allmulti_disable(struct rte_eth_dev *dev)
1108 struct pmd_internals *pmd = dev->data->dev_private;
1109 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
1111 dev->data->all_multicast = 0;
1112 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1113 if (pmd->remote_if_index && !pmd->flow_isolate)
1114 tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
1118 tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1120 struct pmd_internals *pmd = dev->data->dev_private;
1121 enum ioctl_mode mode = LOCAL_ONLY;
1125 if (pmd->type == ETH_TUNTAP_TYPE_TUN) {
1126 TAP_LOG(ERR, "%s: can't MAC address for TUN",
1131 if (is_zero_ether_addr(mac_addr)) {
1132 TAP_LOG(ERR, "%s: can't set an empty MAC address",
1136 /* Check the actual current MAC address on the tap netdevice */
1137 ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
1140 if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
1143 /* Check the current MAC address on the remote */
1144 ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
1147 if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
1149 mode = LOCAL_AND_REMOTE;
1150 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1151 rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
1152 ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
1155 rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
1156 if (pmd->remote_if_index && !pmd->flow_isolate) {
1157 /* Replace MAC redirection rule after a MAC change */
1158 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
1161 "%s: Couldn't delete MAC redirection rule",
1165 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
1168 "%s: Couldn't add MAC redirection rule",
1178 tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
1184 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
1185 * size per mbuf use this pool for both direct and indirect mbufs
1188 struct rte_mempool *mp; /* Mempool for GSO packets */
1190 /* initialize GSO context */
1191 gso_types = DEV_TX_OFFLOAD_TCP_TSO;
1192 snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
1193 mp = rte_mempool_lookup((const char *)pool_name);
1195 mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
1196 TAP_GSO_MBUF_CACHE_SIZE, 0,
1197 RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
1200 struct pmd_internals *pmd = dev->data->dev_private;
1201 RTE_LOG(DEBUG, PMD, "%s: failed to create mbuf pool for device %s\n",
1202 pmd->name, dev->device->name);
1207 gso_ctx->direct_pool = mp;
1208 gso_ctx->indirect_pool = mp;
1209 gso_ctx->gso_types = gso_types;
1210 gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
1217 tap_setup_queue(struct rte_eth_dev *dev,
1218 struct pmd_internals *internals,
1226 struct pmd_internals *pmd = dev->data->dev_private;
1227 struct pmd_process_private *process_private = dev->process_private;
1228 struct rx_queue *rx = &internals->rxq[qid];
1229 struct tx_queue *tx = &internals->txq[qid];
1230 struct rte_gso_ctx *gso_ctx;
1233 fd = &process_private->rxq_fds[qid];
1234 other_fd = &process_private->txq_fds[qid];
1238 fd = &process_private->txq_fds[qid];
1239 other_fd = &process_private->rxq_fds[qid];
1241 gso_ctx = &tx->gso_ctx;
1244 /* fd for this queue already exists */
1245 TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
1246 pmd->name, *fd, dir, qid);
1248 } else if (*other_fd != -1) {
1249 /* Only other_fd exists. dup it */
1250 *fd = dup(*other_fd);
1253 TAP_LOG(ERR, "%s: dup() failed.", pmd->name);
1256 TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)",
1257 pmd->name, *other_fd, dir, qid, *fd);
1259 /* Both RX and TX fds do not exist (equal -1). Create fd */
1260 *fd = tun_alloc(pmd, 0);
1262 *fd = -1; /* restore original value */
1263 TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name);
1266 TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d",
1267 pmd->name, dir, qid, *fd);
1270 tx->mtu = &dev->data->mtu;
1271 rx->rxmode = &dev->data->dev_conf.rxmode;
1273 ret = tap_gso_ctx_setup(gso_ctx, dev);
1278 tx->type = pmd->type;
1284 tap_rx_queue_setup(struct rte_eth_dev *dev,
1285 uint16_t rx_queue_id,
1286 uint16_t nb_rx_desc,
1287 unsigned int socket_id,
1288 const struct rte_eth_rxconf *rx_conf __rte_unused,
1289 struct rte_mempool *mp)
1291 struct pmd_internals *internals = dev->data->dev_private;
1292 struct pmd_process_private *process_private = dev->process_private;
1293 struct rx_queue *rxq = &internals->rxq[rx_queue_id];
1294 struct rte_mbuf **tmp = &rxq->pool;
1295 long iov_max = sysconf(_SC_IOV_MAX);
1296 uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
1297 struct iovec (*iovecs)[nb_desc + 1];
1298 int data_off = RTE_PKTMBUF_HEADROOM;
1303 if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
1305 "nb_rx_queues %d too small or mempool NULL",
1306 dev->data->nb_rx_queues);
1311 rxq->trigger_seen = 1; /* force initial burst */
1312 rxq->in_port = dev->data->port_id;
1313 rxq->queue_id = rx_queue_id;
1314 rxq->nb_rx_desc = nb_desc;
1315 iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
1319 "%s: Couldn't allocate %d RX descriptors",
1320 dev->device->name, nb_desc);
1323 rxq->iovecs = iovecs;
1325 dev->data->rx_queues[rx_queue_id] = rxq;
1326 fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
1332 (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
1333 (*rxq->iovecs)[0].iov_base = &rxq->pi;
1335 for (i = 1; i <= nb_desc; i++) {
1336 *tmp = rte_pktmbuf_alloc(rxq->mp);
1339 "%s: couldn't allocate memory for queue %d",
1340 dev->device->name, rx_queue_id);
1344 (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
1345 (*rxq->iovecs)[i].iov_base =
1346 (char *)(*tmp)->buf_addr + data_off;
1348 tmp = &(*tmp)->next;
1351 TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
1352 internals->name, rx_queue_id,
1353 process_private->rxq_fds[rx_queue_id]);
1358 rte_pktmbuf_free(rxq->pool);
1360 rte_free(rxq->iovecs);
1366 tap_tx_queue_setup(struct rte_eth_dev *dev,
1367 uint16_t tx_queue_id,
1368 uint16_t nb_tx_desc __rte_unused,
1369 unsigned int socket_id __rte_unused,
1370 const struct rte_eth_txconf *tx_conf)
1372 struct pmd_internals *internals = dev->data->dev_private;
1373 struct pmd_process_private *process_private = dev->process_private;
1374 struct tx_queue *txq;
1378 if (tx_queue_id >= dev->data->nb_tx_queues)
1380 dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
1381 txq = dev->data->tx_queues[tx_queue_id];
1382 txq->out_port = dev->data->port_id;
1383 txq->queue_id = tx_queue_id;
1385 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1386 txq->csum = !!(offloads &
1387 (DEV_TX_OFFLOAD_IPV4_CKSUM |
1388 DEV_TX_OFFLOAD_UDP_CKSUM |
1389 DEV_TX_OFFLOAD_TCP_CKSUM));
1391 ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
1395 " TX TUNTAP device name %s, qid %d on fd %d csum %s",
1396 internals->name, tx_queue_id,
1397 process_private->txq_fds[tx_queue_id],
1398 txq->csum ? "on" : "off");
1404 tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1406 struct pmd_internals *pmd = dev->data->dev_private;
1407 struct ifreq ifr = { .ifr_mtu = mtu };
1410 err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
1412 dev->data->mtu = mtu;
1418 tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
1419 struct ether_addr *mc_addr_set __rte_unused,
1420 uint32_t nb_mc_addr __rte_unused)
1423 * Nothing to do actually: the tap has no filtering whatsoever, every
1424 * packet is received.
1430 tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
1432 struct rte_eth_dev *dev = arg;
1433 struct pmd_internals *pmd = dev->data->dev_private;
1434 struct ifinfomsg *info = NLMSG_DATA(nh);
1436 if (nh->nlmsg_type != RTM_NEWLINK ||
1437 (info->ifi_index != pmd->if_index &&
1438 info->ifi_index != pmd->remote_if_index))
1440 return tap_link_update(dev, 0);
1444 tap_dev_intr_handler(void *cb_arg)
1446 struct rte_eth_dev *dev = cb_arg;
1447 struct pmd_internals *pmd = dev->data->dev_private;
1449 tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
1453 tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
1455 struct pmd_internals *pmd = dev->data->dev_private;
1457 /* In any case, disable interrupt if the conf is no longer there. */
1458 if (!dev->data->dev_conf.intr_conf.lsc) {
1459 if (pmd->intr_handle.fd != -1) {
1460 tap_nl_final(pmd->intr_handle.fd);
1461 rte_intr_callback_unregister(&pmd->intr_handle,
1462 tap_dev_intr_handler, dev);
1467 pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK);
1468 if (unlikely(pmd->intr_handle.fd == -1))
1470 return rte_intr_callback_register(
1471 &pmd->intr_handle, tap_dev_intr_handler, dev);
1473 tap_nl_final(pmd->intr_handle.fd);
1474 return rte_intr_callback_unregister(&pmd->intr_handle,
1475 tap_dev_intr_handler, dev);
1479 tap_intr_handle_set(struct rte_eth_dev *dev, int set)
1483 err = tap_lsc_intr_handle_set(dev, set);
1486 err = tap_rx_intr_vec_set(dev, set);
1488 tap_lsc_intr_handle_set(dev, 0);
1492 static const uint32_t*
1493 tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1495 static const uint32_t ptypes[] = {
1496 RTE_PTYPE_INNER_L2_ETHER,
1497 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1498 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1499 RTE_PTYPE_INNER_L3_IPV4,
1500 RTE_PTYPE_INNER_L3_IPV4_EXT,
1501 RTE_PTYPE_INNER_L3_IPV6,
1502 RTE_PTYPE_INNER_L3_IPV6_EXT,
1503 RTE_PTYPE_INNER_L4_FRAG,
1504 RTE_PTYPE_INNER_L4_UDP,
1505 RTE_PTYPE_INNER_L4_TCP,
1506 RTE_PTYPE_INNER_L4_SCTP,
1508 RTE_PTYPE_L2_ETHER_VLAN,
1509 RTE_PTYPE_L2_ETHER_QINQ,
1511 RTE_PTYPE_L3_IPV4_EXT,
1512 RTE_PTYPE_L3_IPV6_EXT,
1524 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
1525 struct rte_eth_fc_conf *fc_conf)
1527 fc_conf->mode = RTE_FC_NONE;
1532 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
1533 struct rte_eth_fc_conf *fc_conf)
1535 if (fc_conf->mode != RTE_FC_NONE)
1541 * DPDK callback to update the RSS hash configuration.
1544 * Pointer to Ethernet device structure.
1545 * @param[in] rss_conf
1546 * RSS configuration data.
1549 * 0 on success, a negative errno value otherwise and rte_errno is set.
1552 tap_rss_hash_update(struct rte_eth_dev *dev,
1553 struct rte_eth_rss_conf *rss_conf)
1555 if (rss_conf->rss_hf & TAP_RSS_HF_MASK) {
1559 if (rss_conf->rss_key && rss_conf->rss_key_len) {
1561 * Currently TAP RSS key is hard coded
1562 * and cannot be updated
1565 "port %u RSS key cannot be updated",
1566 dev->data->port_id);
1574 tap_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1576 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1582 tap_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1584 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1590 tap_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1592 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1598 tap_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1600 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1604 static const struct eth_dev_ops ops = {
1605 .dev_start = tap_dev_start,
1606 .dev_stop = tap_dev_stop,
1607 .dev_close = tap_dev_close,
1608 .dev_configure = tap_dev_configure,
1609 .dev_infos_get = tap_dev_info,
1610 .rx_queue_setup = tap_rx_queue_setup,
1611 .tx_queue_setup = tap_tx_queue_setup,
1612 .rx_queue_start = tap_rx_queue_start,
1613 .tx_queue_start = tap_tx_queue_start,
1614 .rx_queue_stop = tap_rx_queue_stop,
1615 .tx_queue_stop = tap_tx_queue_stop,
1616 .rx_queue_release = tap_rx_queue_release,
1617 .tx_queue_release = tap_tx_queue_release,
1618 .flow_ctrl_get = tap_flow_ctrl_get,
1619 .flow_ctrl_set = tap_flow_ctrl_set,
1620 .link_update = tap_link_update,
1621 .dev_set_link_up = tap_link_set_up,
1622 .dev_set_link_down = tap_link_set_down,
1623 .promiscuous_enable = tap_promisc_enable,
1624 .promiscuous_disable = tap_promisc_disable,
1625 .allmulticast_enable = tap_allmulti_enable,
1626 .allmulticast_disable = tap_allmulti_disable,
1627 .mac_addr_set = tap_mac_set,
1628 .mtu_set = tap_mtu_set,
1629 .set_mc_addr_list = tap_set_mc_addr_list,
1630 .stats_get = tap_stats_get,
1631 .stats_reset = tap_stats_reset,
1632 .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
1633 .rss_hash_update = tap_rss_hash_update,
1634 .filter_ctrl = tap_dev_filter_ctrl,
1638 eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
1639 char *remote_iface, struct ether_addr *mac_addr,
1640 enum rte_tuntap_type type)
1642 int numa_node = rte_socket_id();
1643 struct rte_eth_dev *dev;
1644 struct pmd_internals *pmd;
1645 struct pmd_process_private *process_private;
1646 struct rte_eth_dev_data *data;
1650 TAP_LOG(DEBUG, "%s device on numa %u",
1651 tuntap_name, rte_socket_id());
1653 dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1655 TAP_LOG(ERR, "%s Unable to allocate device struct",
1657 goto error_exit_nodev;
1660 process_private = (struct pmd_process_private *)
1661 rte_zmalloc_socket(tap_name, sizeof(struct pmd_process_private),
1662 RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1664 if (process_private == NULL) {
1665 TAP_LOG(ERR, "Failed to alloc memory for process private");
1668 pmd = dev->data->dev_private;
1669 dev->process_private = process_private;
1671 snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
1674 pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
1675 if (pmd->ioctl_sock == -1) {
1677 "%s Unable to get a socket for management: %s",
1678 tuntap_name, strerror(errno));
1682 /* Setup some default values */
1684 data->dev_private = pmd;
1685 data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1686 data->numa_node = numa_node;
1688 data->dev_link = pmd_link;
1689 data->mac_addrs = &pmd->eth_addr;
1690 /* Set the number of RX and TX queues */
1691 data->nb_rx_queues = 0;
1692 data->nb_tx_queues = 0;
1694 dev->dev_ops = &ops;
1695 dev->rx_pkt_burst = pmd_rx_burst;
1696 dev->tx_pkt_burst = pmd_tx_burst;
1698 pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
1699 pmd->intr_handle.fd = -1;
1700 dev->intr_handle = &pmd->intr_handle;
1702 /* Presetup the fds to -1 as being not valid */
1704 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1705 process_private->rxq_fds[i] = -1;
1706 process_private->txq_fds[i] = -1;
1709 if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
1710 if (is_zero_ether_addr(mac_addr))
1711 eth_random_addr((uint8_t *)&pmd->eth_addr);
1713 rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr));
1717 * Allocate a TUN device keep-alive file descriptor that will only be
1718 * closed when the TUN device itself is closed or removed.
1719 * This keep-alive file descriptor will guarantee that the TUN device
1720 * exists even when all of its queues are closed
1722 pmd->ka_fd = tun_alloc(pmd, 1);
1723 if (pmd->ka_fd == -1) {
1724 TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
1728 ifr.ifr_mtu = dev->data->mtu;
1729 if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
1732 if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
1733 memset(&ifr, 0, sizeof(struct ifreq));
1734 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1735 rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
1737 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
1742 * Set up everything related to rte_flow:
1744 * - tap / remote if_index
1745 * - mandatory QDISCs
1746 * - rte_flow actual/implicit lists
1749 pmd->nlsk_fd = tap_nl_init(0);
1750 if (pmd->nlsk_fd == -1) {
1751 TAP_LOG(WARNING, "%s: failed to create netlink socket.",
1753 goto disable_rte_flow;
1755 pmd->if_index = if_nametoindex(pmd->name);
1756 if (!pmd->if_index) {
1757 TAP_LOG(ERR, "%s: failed to get if_index.", pmd->name);
1758 goto disable_rte_flow;
1760 if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
1761 TAP_LOG(ERR, "%s: failed to create multiq qdisc.",
1763 goto disable_rte_flow;
1765 if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
1766 TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
1768 goto disable_rte_flow;
1770 LIST_INIT(&pmd->flows);
1772 if (strlen(remote_iface)) {
1773 pmd->remote_if_index = if_nametoindex(remote_iface);
1774 if (!pmd->remote_if_index) {
1775 TAP_LOG(ERR, "%s: failed to get %s if_index.",
1776 pmd->name, remote_iface);
1779 snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
1780 "%s", remote_iface);
1782 /* Save state of remote device */
1783 tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
1785 /* Replicate remote MAC address */
1786 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
1787 TAP_LOG(ERR, "%s: failed to get %s MAC address.",
1788 pmd->name, pmd->remote_iface);
1791 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
1793 /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
1794 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
1795 TAP_LOG(ERR, "%s: failed to get %s MAC address.",
1796 pmd->name, remote_iface);
1801 * Flush usually returns negative value because it tries to
1802 * delete every QDISC (and on a running device, one QDISC at
1803 * least is needed). Ignore negative return value.
1805 qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
1806 if (qdisc_create_ingress(pmd->nlsk_fd,
1807 pmd->remote_if_index) < 0) {
1808 TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
1812 LIST_INIT(&pmd->implicit_flows);
1813 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
1814 tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
1815 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
1816 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
1818 "%s: failed to create implicit rules.",
1824 rte_eth_dev_probing_finish(dev);
1828 TAP_LOG(ERR, " Disabling rte flow support: %s(%d)",
1829 strerror(errno), errno);
1830 if (strlen(remote_iface)) {
1831 TAP_LOG(ERR, "Remote feature requires flow support.");
1837 TAP_LOG(ERR, " Can't set up remote feature: %s(%d)",
1838 strerror(errno), errno);
1839 tap_flow_implicit_flush(pmd, NULL);
1842 if (pmd->ioctl_sock > 0)
1843 close(pmd->ioctl_sock);
1844 rte_eth_dev_release_port(dev);
1847 TAP_LOG(ERR, "%s Unable to initialize %s",
1848 tuntap_name, rte_vdev_device_name(vdev));
1854 set_interface_name(const char *key __rte_unused,
1858 char *name = (char *)extra_args;
1861 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN - 1);
1863 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
1864 DEFAULT_TAP_NAME, (tap_unit - 1));
1870 set_remote_iface(const char *key __rte_unused,
1874 char *name = (char *)extra_args;
1877 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
1882 static int parse_user_mac(struct ether_addr *user_mac,
1885 unsigned int index = 0;
1886 char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
1888 if (user_mac == NULL || value == NULL)
1891 strlcpy(mac_temp, value, sizeof(mac_temp));
1892 mac_byte = strtok(mac_temp, ":");
1894 while ((mac_byte != NULL) &&
1895 (strlen(mac_byte) <= 2) &&
1896 (strlen(mac_byte) == strspn(mac_byte,
1897 ETH_TAP_CMP_MAC_FMT))) {
1898 user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
1899 mac_byte = strtok(NULL, ":");
1906 set_mac_type(const char *key __rte_unused,
1910 struct ether_addr *user_mac = extra_args;
1915 if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
1916 static int iface_idx;
1918 /* fixed mac = 00:64:74:61:70:<iface_idx> */
1919 memcpy((char *)user_mac->addr_bytes, "\0dtap", ETHER_ADDR_LEN);
1920 user_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0';
1924 if (parse_user_mac(user_mac, value) != 6)
1927 TAP_LOG(DEBUG, "TAP user MAC param (%s)", value);
1931 TAP_LOG(ERR, "TAP user MAC (%s) is not in format (%s|%s)",
1932 value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
1937 * Open a TUN interface device. TUN PMD
1938 * 1) sets tap_type as false
1939 * 2) intakes iface as argument.
1940 * 3) as interface is virtual set speed to 10G
1943 rte_pmd_tun_probe(struct rte_vdev_device *dev)
1945 const char *name, *params;
1947 struct rte_kvargs *kvlist = NULL;
1948 char tun_name[RTE_ETH_NAME_MAX_LEN];
1949 char remote_iface[RTE_ETH_NAME_MAX_LEN];
1950 struct rte_eth_dev *eth_dev;
1952 strcpy(tuntap_name, "TUN");
1954 name = rte_vdev_device_name(dev);
1955 params = rte_vdev_device_args(dev);
1956 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
1958 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1959 strlen(params) == 0) {
1960 eth_dev = rte_eth_dev_attach_secondary(name);
1962 TAP_LOG(ERR, "Failed to probe %s", name);
1965 eth_dev->dev_ops = &ops;
1966 eth_dev->device = &dev->device;
1967 rte_eth_dev_probing_finish(eth_dev);
1971 snprintf(tun_name, sizeof(tun_name), "%s%u",
1972 DEFAULT_TUN_NAME, tun_unit++);
1974 if (params && (params[0] != '\0')) {
1975 TAP_LOG(DEBUG, "parameters (%s)", params);
1977 kvlist = rte_kvargs_parse(params, valid_arguments);
1979 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
1980 ret = rte_kvargs_process(kvlist,
1982 &set_interface_name,
1990 pmd_link.link_speed = ETH_SPEED_NUM_10G;
1992 TAP_LOG(NOTICE, "Initializing pmd_tun for %s as %s",
1995 ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0,
1996 ETH_TUNTAP_TYPE_TUN);
2000 TAP_LOG(ERR, "Failed to create pmd for %s as %s",
2002 tun_unit--; /* Restore the unit number */
2004 rte_kvargs_free(kvlist);
2009 /* Open a TAP interface device.
2012 rte_pmd_tap_probe(struct rte_vdev_device *dev)
2014 const char *name, *params;
2016 struct rte_kvargs *kvlist = NULL;
2018 char tap_name[RTE_ETH_NAME_MAX_LEN];
2019 char remote_iface[RTE_ETH_NAME_MAX_LEN];
2020 struct ether_addr user_mac = { .addr_bytes = {0} };
2021 struct rte_eth_dev *eth_dev;
2023 strcpy(tuntap_name, "TAP");
2025 name = rte_vdev_device_name(dev);
2026 params = rte_vdev_device_args(dev);
2028 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
2029 eth_dev = rte_eth_dev_attach_secondary(name);
2031 TAP_LOG(ERR, "Failed to probe %s", name);
2034 /* TODO: request info from primary to set up Rx and Tx */
2035 eth_dev->dev_ops = &ops;
2036 eth_dev->device = &dev->device;
2037 rte_eth_dev_probing_finish(eth_dev);
2041 speed = ETH_SPEED_NUM_10G;
2042 snprintf(tap_name, sizeof(tap_name), "%s%u",
2043 DEFAULT_TAP_NAME, tap_unit++);
2044 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
2046 if (params && (params[0] != '\0')) {
2047 TAP_LOG(DEBUG, "parameters (%s)", params);
2049 kvlist = rte_kvargs_parse(params, valid_arguments);
2051 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
2052 ret = rte_kvargs_process(kvlist,
2054 &set_interface_name,
2060 if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
2061 ret = rte_kvargs_process(kvlist,
2069 if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
2070 ret = rte_kvargs_process(kvlist,
2079 pmd_link.link_speed = speed;
2081 TAP_LOG(NOTICE, "Initializing pmd_tap for %s as %s",
2084 ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac,
2085 ETH_TUNTAP_TYPE_TAP);
2089 TAP_LOG(ERR, "Failed to create pmd for %s as %s",
2091 tap_unit--; /* Restore the unit number */
2093 rte_kvargs_free(kvlist);
2098 /* detach a TUNTAP device.
2101 rte_pmd_tap_remove(struct rte_vdev_device *dev)
2103 struct rte_eth_dev *eth_dev = NULL;
2104 struct pmd_internals *internals;
2105 struct pmd_process_private *process_private;
2108 /* find the ethdev entry */
2109 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
2113 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2114 return rte_eth_dev_release_port_secondary(eth_dev);
2116 internals = eth_dev->data->dev_private;
2117 process_private = eth_dev->process_private;
2119 TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
2120 (internals->type == ETH_TUNTAP_TYPE_TAP) ? "TAP" : "TUN",
2123 if (internals->nlsk_fd) {
2124 tap_flow_flush(eth_dev, NULL);
2125 tap_flow_implicit_flush(internals, NULL);
2126 tap_nl_final(internals->nlsk_fd);
2128 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
2129 if (process_private->rxq_fds[i] != -1) {
2130 close(process_private->rxq_fds[i]);
2131 process_private->rxq_fds[i] = -1;
2133 if (process_private->txq_fds[i] != -1) {
2134 close(process_private->txq_fds[i]);
2135 process_private->txq_fds[i] = -1;
2139 close(internals->ioctl_sock);
2140 rte_free(eth_dev->data->dev_private);
2141 rte_free(eth_dev->process_private);
2142 rte_eth_dev_release_port(eth_dev);
2144 if (internals->ka_fd != -1) {
2145 close(internals->ka_fd);
2146 internals->ka_fd = -1;
2151 static struct rte_vdev_driver pmd_tun_drv = {
2152 .probe = rte_pmd_tun_probe,
2153 .remove = rte_pmd_tap_remove,
2156 static struct rte_vdev_driver pmd_tap_drv = {
2157 .probe = rte_pmd_tap_probe,
2158 .remove = rte_pmd_tap_remove,
2161 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
2162 RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv);
2163 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
2164 RTE_PMD_REGISTER_PARAM_STRING(net_tun,
2165 ETH_TAP_IFACE_ARG "=<string> ");
2166 RTE_PMD_REGISTER_PARAM_STRING(net_tap,
2167 ETH_TAP_IFACE_ARG "=<string> "
2168 ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
2169 ETH_TAP_REMOTE_ARG "=<string>");
2172 RTE_INIT(tap_init_log)
2174 tap_logtype = rte_log_register("pmd.net.tap");
2175 if (tap_logtype >= 0)
2176 rte_log_set_level(tap_logtype, RTE_LOG_NOTICE);