1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
5 #include <rte_atomic.h>
6 #include <rte_branch_prediction.h>
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
10 #include <ethdev_driver.h>
11 #include <ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_kvargs.h>
16 #include <rte_debug.h>
18 #include <rte_string_fns.h>
19 #include <rte_ethdev.h>
20 #include <rte_errno.h>
21 #include <rte_cycles.h>
23 #include <sys/types.h>
25 #include <sys/socket.h>
26 #include <sys/ioctl.h>
27 #include <sys/utsname.h>
35 #include <arpa/inet.h>
37 #include <linux/if_tun.h>
38 #include <linux/if_ether.h>
43 #include <rte_eth_tap.h>
45 #include <tap_netlink.h>
46 #include <tap_tcmsgs.h>
48 /* Linux based path to the TUN device */
49 #define TUN_TAP_DEV_PATH "/dev/net/tun"
50 #define DEFAULT_TAP_NAME "dtap"
51 #define DEFAULT_TUN_NAME "dtun"
53 #define ETH_TAP_IFACE_ARG "iface"
54 #define ETH_TAP_REMOTE_ARG "remote"
55 #define ETH_TAP_MAC_ARG "mac"
56 #define ETH_TAP_MAC_FIXED "fixed"
58 #define ETH_TAP_USR_MAC_FMT "xx:xx:xx:xx:xx:xx"
59 #define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef"
60 #define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
62 #define TAP_GSO_MBUFS_PER_CORE 128
63 #define TAP_GSO_MBUF_SEG_SIZE 128
64 #define TAP_GSO_MBUF_CACHE_SIZE 4
65 #define TAP_GSO_MBUFS_NUM \
66 (TAP_GSO_MBUFS_PER_CORE * TAP_GSO_MBUF_CACHE_SIZE)
68 /* IPC key for queue fds sync */
69 #define TAP_MP_KEY "tap_mp_sync_queues"
71 #define TAP_IOV_DEFAULT_MAX 1024
73 #define TAP_RX_OFFLOAD (RTE_ETH_RX_OFFLOAD_SCATTER | \
74 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
75 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
76 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
78 #define TAP_TX_OFFLOAD (RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
79 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
80 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
81 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
82 RTE_ETH_TX_OFFLOAD_TCP_TSO)
84 static int tap_devices_count;
86 static const char *tuntap_types[ETH_TUNTAP_TYPE_MAX] = {
87 "UNKNOWN", "TUN", "TAP"
90 static const char *valid_arguments[] = {
97 static volatile uint32_t tap_trigger; /* Rx trigger */
99 static struct rte_eth_link pmd_link = {
100 .link_speed = RTE_ETH_SPEED_NUM_10G,
101 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
102 .link_status = RTE_ETH_LINK_DOWN,
103 .link_autoneg = RTE_ETH_LINK_FIXED,
107 tap_trigger_cb(int sig __rte_unused)
109 /* Valid trigger values are nonzero */
110 tap_trigger = (tap_trigger + 1) | 0x80000000;
113 /* Specifies on what netdevices the ioctl should be applied */
120 /* Message header to synchronize queues via IPC */
122 char port_name[RTE_DEV_NAME_MAX_LEN];
126 * The file descriptors are in the dedicated part
127 * of the Unix message to be translated by the kernel.
131 static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
134 * Tun/Tap allocation routine
137 * Pointer to private structure.
139 * @param[in] is_keepalive
143 * -1 on failure, fd on success
146 tun_alloc(struct pmd_internals *pmd, int is_keepalive)
149 #ifdef IFF_MULTI_QUEUE
150 unsigned int features;
152 int fd, signo, flags;
154 memset(&ifr, 0, sizeof(struct ifreq));
157 * Do not set IFF_NO_PI as packet information header will be needed
158 * to check if a received packet has been truncated.
160 ifr.ifr_flags = (pmd->type == ETH_TUNTAP_TYPE_TAP) ?
161 IFF_TAP : IFF_TUN | IFF_POINTOPOINT;
162 strlcpy(ifr.ifr_name, pmd->name, IFNAMSIZ);
164 fd = open(TUN_TAP_DEV_PATH, O_RDWR);
166 TAP_LOG(ERR, "Unable to open %s interface", TUN_TAP_DEV_PATH);
170 #ifdef IFF_MULTI_QUEUE
171 /* Grab the TUN features to verify we can work multi-queue */
172 if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
173 TAP_LOG(ERR, "unable to get TUN/TAP features");
176 TAP_LOG(DEBUG, "%s Features %08x", TUN_TAP_DEV_PATH, features);
178 if (features & IFF_MULTI_QUEUE) {
179 TAP_LOG(DEBUG, " Multi-queue support for %d queues",
180 RTE_PMD_TAP_MAX_QUEUES);
181 ifr.ifr_flags |= IFF_MULTI_QUEUE;
185 ifr.ifr_flags |= IFF_ONE_QUEUE;
186 TAP_LOG(DEBUG, " Single queue only support");
189 /* Set the TUN/TAP configuration and set the name if needed */
190 if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
191 TAP_LOG(WARNING, "Unable to set TUNSETIFF for %s: %s",
192 ifr.ifr_name, strerror(errno));
197 * Name passed to kernel might be wildcard like dtun%d
198 * and need to find the resulting device.
200 TAP_LOG(DEBUG, "Device name is '%s'", ifr.ifr_name);
201 strlcpy(pmd->name, ifr.ifr_name, RTE_ETH_NAME_MAX_LEN);
205 * Detach the TUN/TAP keep-alive queue
206 * to avoid traffic through it
208 ifr.ifr_flags = IFF_DETACH_QUEUE;
209 if (ioctl(fd, TUNSETQUEUE, (void *)&ifr) < 0) {
211 "Unable to detach keep-alive queue for %s: %s",
212 ifr.ifr_name, strerror(errno));
217 flags = fcntl(fd, F_GETFL);
220 "Unable to get %s current flags\n",
225 /* Always set the file descriptor to non-blocking */
227 if (fcntl(fd, F_SETFL, flags) < 0) {
229 "Unable to set %s to nonblocking: %s",
230 ifr.ifr_name, strerror(errno));
234 /* Find a free realtime signal */
235 for (signo = SIGRTMIN + 1; signo < SIGRTMAX; signo++) {
238 if (sigaction(signo, NULL, &sa) == -1) {
240 "Unable to get current rt-signal %d handler",
245 /* Already have the handler we want on this signal */
246 if (sa.sa_handler == tap_trigger_cb)
249 /* Is handler in use by application */
250 if (sa.sa_handler != SIG_DFL) {
252 "Skipping used rt-signal %d", signo);
256 sa = (struct sigaction) {
257 .sa_flags = SA_RESTART,
258 .sa_handler = tap_trigger_cb,
261 if (sigaction(signo, &sa, NULL) == -1) {
263 "Unable to set rt-signal %d handler\n", signo);
267 /* Found a good signal to use */
269 "Using rt-signal %d", signo);
273 if (signo == SIGRTMAX) {
274 TAP_LOG(WARNING, "All rt-signals are in use\n");
276 /* Disable trigger globally in case of error */
278 TAP_LOG(NOTICE, "No Rx trigger signal available\n");
280 /* Enable signal on file descriptor */
281 if (fcntl(fd, F_SETSIG, signo) < 0) {
282 TAP_LOG(WARNING, "Unable to set signo %d for fd %d: %s",
283 signo, fd, strerror(errno));
286 if (fcntl(fd, F_SETFL, flags | O_ASYNC) < 0) {
287 TAP_LOG(WARNING, "Unable to set fcntl flags: %s",
292 if (fcntl(fd, F_SETOWN, getpid()) < 0) {
293 TAP_LOG(WARNING, "Unable to set fcntl owner: %s",
307 tap_verify_csum(struct rte_mbuf *mbuf)
309 uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
310 uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
311 uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
312 unsigned int l2_len = sizeof(struct rte_ether_hdr);
317 struct rte_udp_hdr *udp_hdr;
319 if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
321 else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
323 /* Don't verify checksum for packets with discontinuous L2 header */
324 if (unlikely(l2_len + sizeof(struct rte_ipv4_hdr) >
325 rte_pktmbuf_data_len(mbuf)))
327 l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
328 if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
329 struct rte_ipv4_hdr *iph = l3_hdr;
331 l3_len = rte_ipv4_hdr_len(iph);
332 if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
334 /* check that the total length reported by header is not
335 * greater than the total received size
337 if (l2_len + rte_be_to_cpu_16(iph->total_length) >
338 rte_pktmbuf_data_len(mbuf))
341 cksum = ~rte_raw_cksum(iph, l3_len);
342 mbuf->ol_flags |= cksum ?
343 RTE_MBUF_F_RX_IP_CKSUM_BAD :
344 RTE_MBUF_F_RX_IP_CKSUM_GOOD;
345 } else if (l3 == RTE_PTYPE_L3_IPV6) {
346 struct rte_ipv6_hdr *iph = l3_hdr;
348 l3_len = sizeof(struct rte_ipv6_hdr);
349 /* check that the total length reported by header is not
350 * greater than the total received size
352 if (l2_len + l3_len + rte_be_to_cpu_16(iph->payload_len) >
353 rte_pktmbuf_data_len(mbuf))
356 /* - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN cannot happen because
357 * mbuf->packet_type is filled by rte_net_get_ptype() which
358 * never returns this value.
359 * - IPv6 extensions are not supported.
363 if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
366 l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
367 /* Don't verify checksum for multi-segment packets. */
368 if (mbuf->nb_segs > 1)
370 if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
371 if (l4 == RTE_PTYPE_L4_UDP) {
372 udp_hdr = (struct rte_udp_hdr *)l4_hdr;
373 if (udp_hdr->dgram_cksum == 0) {
375 * For IPv4, a zero UDP checksum
376 * indicates that the sender did not
377 * generate one [RFC 768].
379 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
383 cksum_ok = !rte_ipv4_udptcp_cksum_verify(l3_hdr,
385 } else { /* l3 == RTE_PTYPE_L3_IPV6, checked above */
386 cksum_ok = !rte_ipv6_udptcp_cksum_verify(l3_hdr,
389 mbuf->ol_flags |= cksum_ok ?
390 RTE_MBUF_F_RX_L4_CKSUM_GOOD : RTE_MBUF_F_RX_L4_CKSUM_BAD;
395 tap_rxq_pool_free(struct rte_mbuf *pool)
397 struct rte_mbuf *mbuf = pool;
398 uint16_t nb_segs = 1;
407 pool->nb_segs = nb_segs;
408 rte_pktmbuf_free(pool);
411 /* Callback to handle the rx burst of packets to the correct interface and
412 * file descriptor(s) in a multi-queue setup.
415 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
417 struct rx_queue *rxq = queue;
418 struct pmd_process_private *process_private;
420 unsigned long num_rx_bytes = 0;
421 uint32_t trigger = tap_trigger;
423 if (trigger == rxq->trigger_seen)
426 process_private = rte_eth_devices[rxq->in_port].process_private;
427 for (num_rx = 0; num_rx < nb_pkts; ) {
428 struct rte_mbuf *mbuf = rxq->pool;
429 struct rte_mbuf *seg = NULL;
430 struct rte_mbuf *new_tail = NULL;
431 uint16_t data_off = rte_pktmbuf_headroom(mbuf);
434 len = readv(process_private->rxq_fds[rxq->queue_id],
436 1 + (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ?
437 rxq->nb_rx_desc : 1));
438 if (len < (int)sizeof(struct tun_pi))
441 /* Packet couldn't fit in the provided mbuf */
442 if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
443 rxq->stats.ierrors++;
447 len -= sizeof(struct tun_pi);
450 mbuf->port = rxq->in_port;
452 struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
454 if (unlikely(!buf)) {
455 rxq->stats.rx_nombuf++;
456 /* No new buf has been allocated: do nothing */
457 if (!new_tail || !seg)
461 tap_rxq_pool_free(mbuf);
465 seg = seg ? seg->next : mbuf;
466 if (rxq->pool == mbuf)
469 new_tail->next = buf;
471 new_tail->next = seg->next;
473 /* iovecs[0] is reserved for packet info (pi) */
474 (*rxq->iovecs)[mbuf->nb_segs].iov_len =
475 buf->buf_len - data_off;
476 (*rxq->iovecs)[mbuf->nb_segs].iov_base =
477 (char *)buf->buf_addr + data_off;
479 seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
480 seg->data_off = data_off;
482 len -= seg->data_len;
486 /* First segment has headroom, not the others */
490 mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
492 if (rxq->rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
493 tap_verify_csum(mbuf);
495 /* account for the receive frame */
496 bufs[num_rx++] = mbuf;
497 num_rx_bytes += mbuf->pkt_len;
500 rxq->stats.ipackets += num_rx;
501 rxq->stats.ibytes += num_rx_bytes;
503 if (trigger && num_rx < nb_pkts)
504 rxq->trigger_seen = trigger;
509 /* Finalize l4 checksum calculation */
511 tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
512 uint32_t l4_raw_cksum)
517 cksum = __rte_raw_cksum_reduce(l4_raw_cksum);
518 cksum += l4_phdr_cksum;
520 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
521 cksum = (~cksum) & 0xffff;
528 /* Accumulate L4 raw checksums */
530 tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
531 uint32_t *l4_raw_cksum)
533 if (l4_cksum == NULL)
536 *l4_raw_cksum = __rte_raw_cksum(l4_data, l4_len, *l4_raw_cksum);
539 /* L3 and L4 pseudo headers checksum offloads */
541 tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
542 unsigned int l3_len, unsigned int l4_len, uint16_t **l4_cksum,
543 uint16_t *l4_phdr_cksum, uint32_t *l4_raw_cksum)
545 void *l3_hdr = packet + l2_len;
547 if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
548 struct rte_ipv4_hdr *iph = l3_hdr;
551 iph->hdr_checksum = 0;
552 cksum = rte_raw_cksum(iph, l3_len);
553 iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
555 if (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
558 l4_hdr = packet + l2_len + l3_len;
559 if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)
560 *l4_cksum = &((struct rte_udp_hdr *)l4_hdr)->dgram_cksum;
561 else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM)
562 *l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum;
566 if (ol_flags & RTE_MBUF_F_TX_IPV4)
567 *l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
569 *l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
570 *l4_raw_cksum = __rte_raw_cksum(l4_hdr, l4_len, 0);
575 tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
576 struct rte_mbuf **pmbufs,
577 uint16_t *num_packets, unsigned long *num_tx_bytes)
581 struct pmd_process_private *process_private;
583 process_private = rte_eth_devices[txq->out_port].process_private;
585 for (i = 0; i < num_mbufs; i++) {
586 struct rte_mbuf *mbuf = pmbufs[i];
587 struct iovec iovecs[mbuf->nb_segs + 2];
588 struct tun_pi pi = { .flags = 0, .proto = 0x00 };
589 struct rte_mbuf *seg = mbuf;
590 char m_copy[mbuf->data_len];
594 int k; /* current index in iovecs for copying segments */
595 uint16_t seg_len; /* length of first segment */
597 uint16_t *l4_cksum; /* l4 checksum (pseudo header + payload) */
598 uint32_t l4_raw_cksum = 0; /* TCP/UDP payload raw checksum */
599 uint16_t l4_phdr_cksum = 0; /* TCP/UDP pseudo header checksum */
600 uint16_t is_cksum = 0; /* in case cksum should be offloaded */
603 if (txq->type == ETH_TUNTAP_TYPE_TUN) {
605 * TUN and TAP are created with IFF_NO_PI disabled.
606 * For TUN PMD this mandatory as fields are used by
607 * Kernel tun.c to determine whether its IP or non IP
610 * The logic fetches the first byte of data from mbuf
611 * then compares whether its v4 or v6. If first byte
612 * is 4 or 6, then protocol field is updated.
614 char *buff_data = rte_pktmbuf_mtod(seg, void *);
615 proto = (*buff_data & 0xf0);
616 pi.proto = (proto == 0x40) ?
617 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
619 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) :
624 iovecs[k].iov_base = π
625 iovecs[k].iov_len = sizeof(pi);
628 nb_segs = mbuf->nb_segs;
630 ((mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4) ||
631 (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM ||
632 (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM))) {
635 /* Support only packets with at least layer 4
636 * header included in the first segment
638 seg_len = rte_pktmbuf_data_len(mbuf);
639 l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
640 if (seg_len < l234_hlen)
643 /* To change checksums, work on a * copy of l2, l3
644 * headers + l4 pseudo header
646 rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
648 tap_tx_l3_cksum(m_copy, mbuf->ol_flags,
649 mbuf->l2_len, mbuf->l3_len, mbuf->l4_len,
650 &l4_cksum, &l4_phdr_cksum,
652 iovecs[k].iov_base = m_copy;
653 iovecs[k].iov_len = l234_hlen;
656 /* Update next iovecs[] beyond l2, l3, l4 headers */
657 if (seg_len > l234_hlen) {
658 iovecs[k].iov_len = seg_len - l234_hlen;
660 rte_pktmbuf_mtod(seg, char *) +
662 tap_tx_l4_add_rcksum(iovecs[k].iov_base,
663 iovecs[k].iov_len, l4_cksum,
671 for (j = k; j <= nb_segs; j++) {
672 iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
673 iovecs[j].iov_base = rte_pktmbuf_mtod(seg, void *);
675 tap_tx_l4_add_rcksum(iovecs[j].iov_base,
676 iovecs[j].iov_len, l4_cksum,
682 tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
684 /* copy the tx frame data */
685 n = writev(process_private->txq_fds[txq->queue_id], iovecs, j);
690 (*num_tx_bytes) += rte_pktmbuf_pkt_len(mbuf);
695 /* Callback to handle sending packets from the tap interface
698 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
700 struct tx_queue *txq = queue;
702 uint16_t num_packets = 0;
703 unsigned long num_tx_bytes = 0;
707 if (unlikely(nb_pkts == 0))
710 struct rte_mbuf *gso_mbufs[MAX_GSO_MBUFS];
711 max_size = *txq->mtu + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4);
712 for (i = 0; i < nb_pkts; i++) {
713 struct rte_mbuf *mbuf_in = bufs[num_tx];
714 struct rte_mbuf **mbuf;
715 uint16_t num_mbufs = 0;
716 uint16_t tso_segsz = 0;
722 tso = mbuf_in->ol_flags & RTE_MBUF_F_TX_TCP_SEG;
724 struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
726 /* TCP segmentation implies TCP checksum offload */
727 mbuf_in->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
729 /* gso size is calculated without RTE_ETHER_CRC_LEN */
730 hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
732 tso_segsz = mbuf_in->tso_segsz + hdrs_len;
733 if (unlikely(tso_segsz == hdrs_len) ||
734 tso_segsz > *txq->mtu) {
738 gso_ctx->gso_size = tso_segsz;
739 /* 'mbuf_in' packet to segment */
740 num_tso_mbufs = rte_gso_segment(mbuf_in,
741 gso_ctx, /* gso control block */
742 (struct rte_mbuf **)&gso_mbufs, /* out mbufs */
743 RTE_DIM(gso_mbufs)); /* max tso mbufs */
745 /* ret contains the number of new created mbufs */
746 if (num_tso_mbufs < 0)
749 if (num_tso_mbufs >= 1) {
751 num_mbufs = num_tso_mbufs;
753 /* 0 means it can be transmitted directly
760 /* stats.errs will be incremented */
761 if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
764 /* ret 0 indicates no new mbufs were created */
770 ret = tap_write_mbufs(txq, num_mbufs, mbuf,
771 &num_packets, &num_tx_bytes);
775 if (num_tso_mbufs > 0)
776 rte_pktmbuf_free_bulk(mbuf, num_tso_mbufs);
780 /* free original mbuf */
781 rte_pktmbuf_free(mbuf_in);
783 if (num_tso_mbufs > 0)
784 rte_pktmbuf_free_bulk(mbuf, num_tso_mbufs);
787 txq->stats.opackets += num_packets;
788 txq->stats.errs += nb_pkts - num_tx;
789 txq->stats.obytes += num_tx_bytes;
795 tap_ioctl_req2str(unsigned long request)
799 return "SIOCSIFFLAGS";
801 return "SIOCGIFFLAGS";
803 return "SIOCGIFHWADDR";
805 return "SIOCSIFHWADDR";
813 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
814 struct ifreq *ifr, int set, enum ioctl_mode mode)
816 short req_flags = ifr->ifr_flags;
817 int remote = pmd->remote_if_index &&
818 (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
820 if (!pmd->remote_if_index && mode == REMOTE_ONLY)
823 * If there is a remote netdevice, apply ioctl on it, then apply it on
828 strlcpy(ifr->ifr_name, pmd->remote_iface, IFNAMSIZ);
829 else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
830 strlcpy(ifr->ifr_name, pmd->name, IFNAMSIZ);
833 /* fetch current flags to leave other flags untouched */
834 if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
837 ifr->ifr_flags |= req_flags;
839 ifr->ifr_flags &= ~req_flags;
847 TAP_LOG(WARNING, "%s: ioctl() called with wrong arg",
851 if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
853 if (remote-- && mode == LOCAL_AND_REMOTE)
858 TAP_LOG(DEBUG, "%s(%s) failed: %s(%d)", ifr->ifr_name,
859 tap_ioctl_req2str(request), strerror(errno), errno);
864 tap_link_set_down(struct rte_eth_dev *dev)
866 struct pmd_internals *pmd = dev->data->dev_private;
867 struct ifreq ifr = { .ifr_flags = IFF_UP };
869 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
870 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
874 tap_link_set_up(struct rte_eth_dev *dev)
876 struct pmd_internals *pmd = dev->data->dev_private;
877 struct ifreq ifr = { .ifr_flags = IFF_UP };
879 dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
880 return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
884 tap_dev_start(struct rte_eth_dev *dev)
888 err = tap_intr_handle_set(dev, 1);
892 err = tap_link_set_up(dev);
896 for (i = 0; i < dev->data->nb_tx_queues; i++)
897 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
898 for (i = 0; i < dev->data->nb_rx_queues; i++)
899 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
904 /* This function gets called when the current port gets stopped.
907 tap_dev_stop(struct rte_eth_dev *dev)
911 for (i = 0; i < dev->data->nb_tx_queues; i++)
912 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
913 for (i = 0; i < dev->data->nb_rx_queues; i++)
914 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
916 tap_intr_handle_set(dev, 0);
917 tap_link_set_down(dev);
923 tap_dev_configure(struct rte_eth_dev *dev)
925 struct pmd_internals *pmd = dev->data->dev_private;
927 if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
929 "%s: number of rx queues %d exceeds max num of queues %d",
931 dev->data->nb_rx_queues,
932 RTE_PMD_TAP_MAX_QUEUES);
935 if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
937 "%s: number of tx queues %d exceeds max num of queues %d",
939 dev->data->nb_tx_queues,
940 RTE_PMD_TAP_MAX_QUEUES);
943 if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) {
945 "%s: number of rx queues %d must be equal to number of tx queues %d",
947 dev->data->nb_rx_queues,
948 dev->data->nb_tx_queues);
952 TAP_LOG(INFO, "%s: %s: TX configured queues number: %u",
953 dev->device->name, pmd->name, dev->data->nb_tx_queues);
955 TAP_LOG(INFO, "%s: %s: RX configured queues number: %u",
956 dev->device->name, pmd->name, dev->data->nb_rx_queues);
962 tap_dev_speed_capa(void)
964 uint32_t speed = pmd_link.link_speed;
967 if (speed >= RTE_ETH_SPEED_NUM_10M)
968 capa |= RTE_ETH_LINK_SPEED_10M;
969 if (speed >= RTE_ETH_SPEED_NUM_100M)
970 capa |= RTE_ETH_LINK_SPEED_100M;
971 if (speed >= RTE_ETH_SPEED_NUM_1G)
972 capa |= RTE_ETH_LINK_SPEED_1G;
973 if (speed >= RTE_ETH_SPEED_NUM_5G)
974 capa |= RTE_ETH_LINK_SPEED_2_5G;
975 if (speed >= RTE_ETH_SPEED_NUM_5G)
976 capa |= RTE_ETH_LINK_SPEED_5G;
977 if (speed >= RTE_ETH_SPEED_NUM_10G)
978 capa |= RTE_ETH_LINK_SPEED_10G;
979 if (speed >= RTE_ETH_SPEED_NUM_20G)
980 capa |= RTE_ETH_LINK_SPEED_20G;
981 if (speed >= RTE_ETH_SPEED_NUM_25G)
982 capa |= RTE_ETH_LINK_SPEED_25G;
983 if (speed >= RTE_ETH_SPEED_NUM_40G)
984 capa |= RTE_ETH_LINK_SPEED_40G;
985 if (speed >= RTE_ETH_SPEED_NUM_50G)
986 capa |= RTE_ETH_LINK_SPEED_50G;
987 if (speed >= RTE_ETH_SPEED_NUM_56G)
988 capa |= RTE_ETH_LINK_SPEED_56G;
989 if (speed >= RTE_ETH_SPEED_NUM_100G)
990 capa |= RTE_ETH_LINK_SPEED_100G;
996 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
998 struct pmd_internals *internals = dev->data->dev_private;
1000 dev_info->if_index = internals->if_index;
1001 dev_info->max_mac_addrs = 1;
1002 dev_info->max_rx_pktlen = (uint32_t)RTE_ETHER_MAX_VLAN_FRAME_LEN;
1003 dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
1004 dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
1005 dev_info->min_rx_bufsize = 0;
1006 dev_info->speed_capa = tap_dev_speed_capa();
1007 dev_info->rx_queue_offload_capa = TAP_RX_OFFLOAD;
1008 dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa;
1009 dev_info->tx_queue_offload_capa = TAP_TX_OFFLOAD;
1010 dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa;
1011 dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
1013 * limitation: TAP supports all of IP, UDP and TCP hash
1014 * functions together and not in partial combinations
1016 dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
1017 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
1023 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
1025 unsigned int i, imax;
1026 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
1027 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
1028 unsigned long rx_nombuf = 0, ierrors = 0;
1029 const struct pmd_internals *pmd = dev->data->dev_private;
1031 /* rx queue statistics */
1032 imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1033 dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1034 for (i = 0; i < imax; i++) {
1035 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
1036 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
1037 rx_total += tap_stats->q_ipackets[i];
1038 rx_bytes_total += tap_stats->q_ibytes[i];
1039 rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
1040 ierrors += pmd->rxq[i].stats.ierrors;
1043 /* tx queue statistics */
1044 imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1045 dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1047 for (i = 0; i < imax; i++) {
1048 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
1049 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
1050 tx_total += tap_stats->q_opackets[i];
1051 tx_err_total += pmd->txq[i].stats.errs;
1052 tx_bytes_total += tap_stats->q_obytes[i];
1055 tap_stats->ipackets = rx_total;
1056 tap_stats->ibytes = rx_bytes_total;
1057 tap_stats->ierrors = ierrors;
1058 tap_stats->rx_nombuf = rx_nombuf;
1059 tap_stats->opackets = tx_total;
1060 tap_stats->oerrors = tx_err_total;
1061 tap_stats->obytes = tx_bytes_total;
1066 tap_stats_reset(struct rte_eth_dev *dev)
1069 struct pmd_internals *pmd = dev->data->dev_private;
1071 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1072 pmd->rxq[i].stats.ipackets = 0;
1073 pmd->rxq[i].stats.ibytes = 0;
1074 pmd->rxq[i].stats.ierrors = 0;
1075 pmd->rxq[i].stats.rx_nombuf = 0;
1077 pmd->txq[i].stats.opackets = 0;
1078 pmd->txq[i].stats.errs = 0;
1079 pmd->txq[i].stats.obytes = 0;
1086 tap_dev_close(struct rte_eth_dev *dev)
1089 struct pmd_internals *internals = dev->data->dev_private;
1090 struct pmd_process_private *process_private = dev->process_private;
1091 struct rx_queue *rxq;
1093 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1094 rte_free(dev->process_private);
1098 tap_link_set_down(dev);
1099 if (internals->nlsk_fd != -1) {
1100 tap_flow_flush(dev, NULL);
1101 tap_flow_implicit_flush(internals, NULL);
1102 tap_nl_final(internals->nlsk_fd);
1103 internals->nlsk_fd = -1;
1106 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1107 if (process_private->rxq_fds[i] != -1) {
1108 rxq = &internals->rxq[i];
1109 close(process_private->rxq_fds[i]);
1110 process_private->rxq_fds[i] = -1;
1111 tap_rxq_pool_free(rxq->pool);
1112 rte_free(rxq->iovecs);
1116 if (process_private->txq_fds[i] != -1) {
1117 close(process_private->txq_fds[i]);
1118 process_private->txq_fds[i] = -1;
1122 if (internals->remote_if_index) {
1123 /* Restore initial remote state */
1124 int ret = ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
1125 &internals->remote_initial_flags);
1127 TAP_LOG(ERR, "restore remote state failed: %d", ret);
1131 rte_mempool_free(internals->gso_ctx_mp);
1132 internals->gso_ctx_mp = NULL;
1134 if (internals->ka_fd != -1) {
1135 close(internals->ka_fd);
1136 internals->ka_fd = -1;
1139 /* mac_addrs must not be freed alone because part of dev_private */
1140 dev->data->mac_addrs = NULL;
1142 internals = dev->data->dev_private;
1143 TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
1144 tuntap_types[internals->type], rte_socket_id());
1146 if (internals->ioctl_sock != -1) {
1147 close(internals->ioctl_sock);
1148 internals->ioctl_sock = -1;
1150 rte_free(dev->process_private);
1151 if (tap_devices_count == 1)
1152 rte_mp_action_unregister(TAP_MP_KEY);
1153 tap_devices_count--;
1155 * Since TUN device has no more opened file descriptors
1156 * it will be removed from kernel
1163 tap_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1165 struct rx_queue *rxq = dev->data->rx_queues[qid];
1166 struct pmd_process_private *process_private;
1170 process_private = rte_eth_devices[rxq->in_port].process_private;
1171 if (process_private->rxq_fds[rxq->queue_id] != -1) {
1172 close(process_private->rxq_fds[rxq->queue_id]);
1173 process_private->rxq_fds[rxq->queue_id] = -1;
1174 tap_rxq_pool_free(rxq->pool);
1175 rte_free(rxq->iovecs);
1182 tap_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1184 struct tx_queue *txq = dev->data->tx_queues[qid];
1185 struct pmd_process_private *process_private;
1189 process_private = rte_eth_devices[txq->out_port].process_private;
1191 if (process_private->txq_fds[txq->queue_id] != -1) {
1192 close(process_private->txq_fds[txq->queue_id]);
1193 process_private->txq_fds[txq->queue_id] = -1;
1198 tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
1200 struct rte_eth_link *dev_link = &dev->data->dev_link;
1201 struct pmd_internals *pmd = dev->data->dev_private;
1202 struct ifreq ifr = { .ifr_flags = 0 };
1204 if (pmd->remote_if_index) {
1205 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
1206 if (!(ifr.ifr_flags & IFF_UP) ||
1207 !(ifr.ifr_flags & IFF_RUNNING)) {
1208 dev_link->link_status = RTE_ETH_LINK_DOWN;
1212 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
1213 dev_link->link_status =
1214 ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
1221 tap_promisc_enable(struct rte_eth_dev *dev)
1223 struct pmd_internals *pmd = dev->data->dev_private;
1224 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
1227 ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1231 if (pmd->remote_if_index && !pmd->flow_isolate) {
1232 dev->data->promiscuous = 1;
1233 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
1235 /* Rollback promisc flag */
1236 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1238 * rte_eth_dev_promiscuous_enable() rollback
1239 * dev->data->promiscuous in the case of failure.
1249 tap_promisc_disable(struct rte_eth_dev *dev)
1251 struct pmd_internals *pmd = dev->data->dev_private;
1252 struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
1255 ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1259 if (pmd->remote_if_index && !pmd->flow_isolate) {
1260 dev->data->promiscuous = 0;
1261 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
1263 /* Rollback promisc flag */
1264 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1266 * rte_eth_dev_promiscuous_disable() rollback
1267 * dev->data->promiscuous in the case of failure.
1277 tap_allmulti_enable(struct rte_eth_dev *dev)
1279 struct pmd_internals *pmd = dev->data->dev_private;
1280 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
1283 ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1287 if (pmd->remote_if_index && !pmd->flow_isolate) {
1288 dev->data->all_multicast = 1;
1289 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
1291 /* Rollback allmulti flag */
1292 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1294 * rte_eth_dev_allmulticast_enable() rollback
1295 * dev->data->all_multicast in the case of failure.
1305 tap_allmulti_disable(struct rte_eth_dev *dev)
1307 struct pmd_internals *pmd = dev->data->dev_private;
1308 struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
1311 ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1315 if (pmd->remote_if_index && !pmd->flow_isolate) {
1316 dev->data->all_multicast = 0;
1317 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
1319 /* Rollback allmulti flag */
1320 tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1322 * rte_eth_dev_allmulticast_disable() rollback
1323 * dev->data->all_multicast in the case of failure.
1333 tap_mac_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1335 struct pmd_internals *pmd = dev->data->dev_private;
1336 enum ioctl_mode mode = LOCAL_ONLY;
1340 if (pmd->type == ETH_TUNTAP_TYPE_TUN) {
1341 TAP_LOG(ERR, "%s: can't MAC address for TUN",
1346 if (rte_is_zero_ether_addr(mac_addr)) {
1347 TAP_LOG(ERR, "%s: can't set an empty MAC address",
1351 /* Check the actual current MAC address on the tap netdevice */
1352 ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
1355 if (rte_is_same_ether_addr(
1356 (struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data,
1359 /* Check the current MAC address on the remote */
1360 ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
1363 if (!rte_is_same_ether_addr(
1364 (struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data,
1366 mode = LOCAL_AND_REMOTE;
1367 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1368 rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, RTE_ETHER_ADDR_LEN);
1369 ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
1372 rte_memcpy(&pmd->eth_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1373 if (pmd->remote_if_index && !pmd->flow_isolate) {
1374 /* Replace MAC redirection rule after a MAC change */
1375 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
1378 "%s: Couldn't delete MAC redirection rule",
1382 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
1385 "%s: Couldn't add MAC redirection rule",
1395 tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
1399 struct pmd_internals *pmd = dev->data->dev_private;
1402 /* initialize GSO context */
1403 gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO;
1404 if (!pmd->gso_ctx_mp) {
1406 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
1407 * bytes size per mbuf use this pool for both direct and
1410 ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
1412 if (ret < 0 || ret >= (int)sizeof(pool_name)) {
1414 "%s: failed to create mbuf pool name for device %s,"
1415 "device name too long or output error, ret: %d\n",
1416 pmd->name, dev->device->name, ret);
1417 return -ENAMETOOLONG;
1419 pmd->gso_ctx_mp = rte_pktmbuf_pool_create(pool_name,
1420 TAP_GSO_MBUFS_NUM, TAP_GSO_MBUF_CACHE_SIZE, 0,
1421 RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
1423 if (!pmd->gso_ctx_mp) {
1425 "%s: failed to create mbuf pool for device %s\n",
1426 pmd->name, dev->device->name);
1431 gso_ctx->direct_pool = pmd->gso_ctx_mp;
1432 gso_ctx->indirect_pool = pmd->gso_ctx_mp;
1433 gso_ctx->gso_types = gso_types;
1434 gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
1441 tap_setup_queue(struct rte_eth_dev *dev,
1442 struct pmd_internals *internals,
1450 struct pmd_internals *pmd = dev->data->dev_private;
1451 struct pmd_process_private *process_private = dev->process_private;
1452 struct rx_queue *rx = &internals->rxq[qid];
1453 struct tx_queue *tx = &internals->txq[qid];
1454 struct rte_gso_ctx *gso_ctx;
1457 fd = &process_private->rxq_fds[qid];
1458 other_fd = &process_private->txq_fds[qid];
1462 fd = &process_private->txq_fds[qid];
1463 other_fd = &process_private->rxq_fds[qid];
1465 gso_ctx = &tx->gso_ctx;
1468 /* fd for this queue already exists */
1469 TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
1470 pmd->name, *fd, dir, qid);
1472 } else if (*other_fd != -1) {
1473 /* Only other_fd exists. dup it */
1474 *fd = dup(*other_fd);
1477 TAP_LOG(ERR, "%s: dup() failed.", pmd->name);
1480 TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)",
1481 pmd->name, *other_fd, dir, qid, *fd);
1483 /* Both RX and TX fds do not exist (equal -1). Create fd */
1484 *fd = tun_alloc(pmd, 0);
1486 *fd = -1; /* restore original value */
1487 TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name);
1490 TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d",
1491 pmd->name, dir, qid, *fd);
1494 tx->mtu = &dev->data->mtu;
1495 rx->rxmode = &dev->data->dev_conf.rxmode;
1497 ret = tap_gso_ctx_setup(gso_ctx, dev);
1502 tx->type = pmd->type;
1508 tap_rx_queue_setup(struct rte_eth_dev *dev,
1509 uint16_t rx_queue_id,
1510 uint16_t nb_rx_desc,
1511 unsigned int socket_id,
1512 const struct rte_eth_rxconf *rx_conf __rte_unused,
1513 struct rte_mempool *mp)
1515 struct pmd_internals *internals = dev->data->dev_private;
1516 struct pmd_process_private *process_private = dev->process_private;
1517 struct rx_queue *rxq = &internals->rxq[rx_queue_id];
1518 struct rte_mbuf **tmp = &rxq->pool;
1519 long iov_max = sysconf(_SC_IOV_MAX);
1523 "_SC_IOV_MAX is not defined. Using %d as default",
1524 TAP_IOV_DEFAULT_MAX);
1525 iov_max = TAP_IOV_DEFAULT_MAX;
1527 uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
1528 struct iovec (*iovecs)[nb_desc + 1];
1529 int data_off = RTE_PKTMBUF_HEADROOM;
1534 if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
1536 "nb_rx_queues %d too small or mempool NULL",
1537 dev->data->nb_rx_queues);
1542 rxq->trigger_seen = 1; /* force initial burst */
1543 rxq->in_port = dev->data->port_id;
1544 rxq->queue_id = rx_queue_id;
1545 rxq->nb_rx_desc = nb_desc;
1546 iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
1550 "%s: Couldn't allocate %d RX descriptors",
1551 dev->device->name, nb_desc);
1554 rxq->iovecs = iovecs;
1556 dev->data->rx_queues[rx_queue_id] = rxq;
1557 fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
1563 (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
1564 (*rxq->iovecs)[0].iov_base = &rxq->pi;
1566 for (i = 1; i <= nb_desc; i++) {
1567 *tmp = rte_pktmbuf_alloc(rxq->mp);
1570 "%s: couldn't allocate memory for queue %d",
1571 dev->device->name, rx_queue_id);
1575 (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
1576 (*rxq->iovecs)[i].iov_base =
1577 (char *)(*tmp)->buf_addr + data_off;
1579 tmp = &(*tmp)->next;
1582 TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
1583 internals->name, rx_queue_id,
1584 process_private->rxq_fds[rx_queue_id]);
1589 tap_rxq_pool_free(rxq->pool);
1591 rte_free(rxq->iovecs);
1597 tap_tx_queue_setup(struct rte_eth_dev *dev,
1598 uint16_t tx_queue_id,
1599 uint16_t nb_tx_desc __rte_unused,
1600 unsigned int socket_id __rte_unused,
1601 const struct rte_eth_txconf *tx_conf)
1603 struct pmd_internals *internals = dev->data->dev_private;
1604 struct pmd_process_private *process_private = dev->process_private;
1605 struct tx_queue *txq;
1609 if (tx_queue_id >= dev->data->nb_tx_queues)
1611 dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
1612 txq = dev->data->tx_queues[tx_queue_id];
1613 txq->out_port = dev->data->port_id;
1614 txq->queue_id = tx_queue_id;
1616 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1617 txq->csum = !!(offloads &
1618 (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1619 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1620 RTE_ETH_TX_OFFLOAD_TCP_CKSUM));
1622 ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
1626 " TX TUNTAP device name %s, qid %d on fd %d csum %s",
1627 internals->name, tx_queue_id,
1628 process_private->txq_fds[tx_queue_id],
1629 txq->csum ? "on" : "off");
1635 tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1637 struct pmd_internals *pmd = dev->data->dev_private;
1638 struct ifreq ifr = { .ifr_mtu = mtu };
1640 return tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
1644 tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
1645 struct rte_ether_addr *mc_addr_set __rte_unused,
1646 uint32_t nb_mc_addr __rte_unused)
1649 * Nothing to do actually: the tap has no filtering whatsoever, every
1650 * packet is received.
1656 tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
1658 struct rte_eth_dev *dev = arg;
1659 struct pmd_internals *pmd = dev->data->dev_private;
1660 struct ifinfomsg *info = NLMSG_DATA(nh);
1662 if (nh->nlmsg_type != RTM_NEWLINK ||
1663 (info->ifi_index != pmd->if_index &&
1664 info->ifi_index != pmd->remote_if_index))
1666 return tap_link_update(dev, 0);
1670 tap_dev_intr_handler(void *cb_arg)
1672 struct rte_eth_dev *dev = cb_arg;
1673 struct pmd_internals *pmd = dev->data->dev_private;
1675 if (rte_intr_fd_get(pmd->intr_handle) >= 0)
1676 tap_nl_recv(rte_intr_fd_get(pmd->intr_handle),
1677 tap_nl_msg_handler, dev);
1681 tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
1683 struct pmd_internals *pmd = dev->data->dev_private;
1686 /* In any case, disable interrupt if the conf is no longer there. */
1687 if (!dev->data->dev_conf.intr_conf.lsc) {
1688 if (rte_intr_fd_get(pmd->intr_handle) != -1)
1694 rte_intr_fd_set(pmd->intr_handle, tap_nl_init(RTMGRP_LINK));
1695 if (unlikely(rte_intr_fd_get(pmd->intr_handle) == -1))
1697 return rte_intr_callback_register(
1698 pmd->intr_handle, tap_dev_intr_handler, dev);
1703 ret = rte_intr_callback_unregister(pmd->intr_handle,
1704 tap_dev_intr_handler, dev);
1707 } else if (ret == -EAGAIN) {
1710 TAP_LOG(ERR, "intr callback unregister failed: %d",
1716 if (rte_intr_fd_get(pmd->intr_handle) >= 0) {
1717 tap_nl_final(rte_intr_fd_get(pmd->intr_handle));
1718 rte_intr_fd_set(pmd->intr_handle, -1);
1725 tap_intr_handle_set(struct rte_eth_dev *dev, int set)
1729 err = tap_lsc_intr_handle_set(dev, set);
1732 tap_rx_intr_vec_set(dev, 0);
1735 err = tap_rx_intr_vec_set(dev, set);
1737 tap_lsc_intr_handle_set(dev, 0);
1741 static const uint32_t*
1742 tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1744 static const uint32_t ptypes[] = {
1745 RTE_PTYPE_INNER_L2_ETHER,
1746 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1747 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1748 RTE_PTYPE_INNER_L3_IPV4,
1749 RTE_PTYPE_INNER_L3_IPV4_EXT,
1750 RTE_PTYPE_INNER_L3_IPV6,
1751 RTE_PTYPE_INNER_L3_IPV6_EXT,
1752 RTE_PTYPE_INNER_L4_FRAG,
1753 RTE_PTYPE_INNER_L4_UDP,
1754 RTE_PTYPE_INNER_L4_TCP,
1755 RTE_PTYPE_INNER_L4_SCTP,
1757 RTE_PTYPE_L2_ETHER_VLAN,
1758 RTE_PTYPE_L2_ETHER_QINQ,
1760 RTE_PTYPE_L3_IPV4_EXT,
1761 RTE_PTYPE_L3_IPV6_EXT,
1773 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
1774 struct rte_eth_fc_conf *fc_conf)
1776 fc_conf->mode = RTE_ETH_FC_NONE;
1781 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
1782 struct rte_eth_fc_conf *fc_conf)
1784 if (fc_conf->mode != RTE_ETH_FC_NONE)
1790 * DPDK callback to update the RSS hash configuration.
1793 * Pointer to Ethernet device structure.
1794 * @param[in] rss_conf
1795 * RSS configuration data.
1798 * 0 on success, a negative errno value otherwise and rte_errno is set.
1801 tap_rss_hash_update(struct rte_eth_dev *dev,
1802 struct rte_eth_rss_conf *rss_conf)
1804 if (rss_conf->rss_hf & TAP_RSS_HF_MASK) {
1808 if (rss_conf->rss_key && rss_conf->rss_key_len) {
1810 * Currently TAP RSS key is hard coded
1811 * and cannot be updated
1814 "port %u RSS key cannot be updated",
1815 dev->data->port_id);
1823 tap_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1825 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1831 tap_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1833 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1839 tap_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1841 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1847 tap_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1849 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1853 static const struct eth_dev_ops ops = {
1854 .dev_start = tap_dev_start,
1855 .dev_stop = tap_dev_stop,
1856 .dev_close = tap_dev_close,
1857 .dev_configure = tap_dev_configure,
1858 .dev_infos_get = tap_dev_info,
1859 .rx_queue_setup = tap_rx_queue_setup,
1860 .tx_queue_setup = tap_tx_queue_setup,
1861 .rx_queue_start = tap_rx_queue_start,
1862 .tx_queue_start = tap_tx_queue_start,
1863 .rx_queue_stop = tap_rx_queue_stop,
1864 .tx_queue_stop = tap_tx_queue_stop,
1865 .rx_queue_release = tap_rx_queue_release,
1866 .tx_queue_release = tap_tx_queue_release,
1867 .flow_ctrl_get = tap_flow_ctrl_get,
1868 .flow_ctrl_set = tap_flow_ctrl_set,
1869 .link_update = tap_link_update,
1870 .dev_set_link_up = tap_link_set_up,
1871 .dev_set_link_down = tap_link_set_down,
1872 .promiscuous_enable = tap_promisc_enable,
1873 .promiscuous_disable = tap_promisc_disable,
1874 .allmulticast_enable = tap_allmulti_enable,
1875 .allmulticast_disable = tap_allmulti_disable,
1876 .mac_addr_set = tap_mac_set,
1877 .mtu_set = tap_mtu_set,
1878 .set_mc_addr_list = tap_set_mc_addr_list,
1879 .stats_get = tap_stats_get,
1880 .stats_reset = tap_stats_reset,
1881 .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
1882 .rss_hash_update = tap_rss_hash_update,
1883 .flow_ops_get = tap_dev_flow_ops_get,
1887 eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
1888 char *remote_iface, struct rte_ether_addr *mac_addr,
1889 enum rte_tuntap_type type)
1891 int numa_node = rte_socket_id();
1892 struct rte_eth_dev *dev;
1893 struct pmd_internals *pmd;
1894 struct pmd_process_private *process_private;
1895 const char *tuntap_name = tuntap_types[type];
1896 struct rte_eth_dev_data *data;
1900 TAP_LOG(DEBUG, "%s device on numa %u", tuntap_name, rte_socket_id());
1902 dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1904 TAP_LOG(ERR, "%s Unable to allocate device struct",
1906 goto error_exit_nodev;
1909 process_private = (struct pmd_process_private *)
1910 rte_zmalloc_socket(tap_name, sizeof(struct pmd_process_private),
1911 RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1913 if (process_private == NULL) {
1914 TAP_LOG(ERR, "Failed to alloc memory for process private");
1917 pmd = dev->data->dev_private;
1918 dev->process_private = process_private;
1920 strlcpy(pmd->name, tap_name, sizeof(pmd->name));
1924 pmd->gso_ctx_mp = NULL;
1926 pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
1927 if (pmd->ioctl_sock == -1) {
1929 "%s Unable to get a socket for management: %s",
1930 tuntap_name, strerror(errno));
1934 /* Allocate interrupt instance */
1935 pmd->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
1936 if (pmd->intr_handle == NULL) {
1937 TAP_LOG(ERR, "Failed to allocate intr handle");
1941 /* Setup some default values */
1943 data->dev_private = pmd;
1944 data->dev_flags = RTE_ETH_DEV_INTR_LSC |
1945 RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1946 data->numa_node = numa_node;
1948 data->dev_link = pmd_link;
1949 data->mac_addrs = &pmd->eth_addr;
1950 /* Set the number of RX and TX queues */
1951 data->nb_rx_queues = 0;
1952 data->nb_tx_queues = 0;
1954 dev->dev_ops = &ops;
1955 dev->rx_pkt_burst = pmd_rx_burst;
1956 dev->tx_pkt_burst = pmd_tx_burst;
1958 rte_intr_type_set(pmd->intr_handle, RTE_INTR_HANDLE_EXT);
1959 rte_intr_fd_set(pmd->intr_handle, -1);
1960 dev->intr_handle = pmd->intr_handle;
1962 /* Presetup the fds to -1 as being not valid */
1963 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1964 process_private->rxq_fds[i] = -1;
1965 process_private->txq_fds[i] = -1;
1968 if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
1969 if (rte_is_zero_ether_addr(mac_addr))
1970 rte_eth_random_addr((uint8_t *)&pmd->eth_addr);
1972 rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr));
1976 * Allocate a TUN device keep-alive file descriptor that will only be
1977 * closed when the TUN device itself is closed or removed.
1978 * This keep-alive file descriptor will guarantee that the TUN device
1979 * exists even when all of its queues are closed
1981 pmd->ka_fd = tun_alloc(pmd, 1);
1982 if (pmd->ka_fd == -1) {
1983 TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
1986 TAP_LOG(DEBUG, "allocated %s", pmd->name);
1988 ifr.ifr_mtu = dev->data->mtu;
1989 if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
1992 if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
1993 memset(&ifr, 0, sizeof(struct ifreq));
1994 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1995 rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
1996 RTE_ETHER_ADDR_LEN);
1997 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
2002 * Set up everything related to rte_flow:
2004 * - tap / remote if_index
2005 * - mandatory QDISCs
2006 * - rte_flow actual/implicit lists
2009 pmd->nlsk_fd = tap_nl_init(0);
2010 if (pmd->nlsk_fd == -1) {
2011 TAP_LOG(WARNING, "%s: failed to create netlink socket.",
2013 goto disable_rte_flow;
2015 pmd->if_index = if_nametoindex(pmd->name);
2016 if (!pmd->if_index) {
2017 TAP_LOG(ERR, "%s: failed to get if_index.", pmd->name);
2018 goto disable_rte_flow;
2020 if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
2021 TAP_LOG(ERR, "%s: failed to create multiq qdisc.",
2023 goto disable_rte_flow;
2025 if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
2026 TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
2028 goto disable_rte_flow;
2030 LIST_INIT(&pmd->flows);
2032 if (strlen(remote_iface)) {
2033 pmd->remote_if_index = if_nametoindex(remote_iface);
2034 if (!pmd->remote_if_index) {
2035 TAP_LOG(ERR, "%s: failed to get %s if_index.",
2036 pmd->name, remote_iface);
2039 strlcpy(pmd->remote_iface, remote_iface, RTE_ETH_NAME_MAX_LEN);
2041 /* Save state of remote device */
2042 tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
2044 /* Replicate remote MAC address */
2045 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
2046 TAP_LOG(ERR, "%s: failed to get %s MAC address.",
2047 pmd->name, pmd->remote_iface);
2050 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
2051 RTE_ETHER_ADDR_LEN);
2052 /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
2053 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
2054 TAP_LOG(ERR, "%s: failed to get %s MAC address.",
2055 pmd->name, remote_iface);
2060 * Flush usually returns negative value because it tries to
2061 * delete every QDISC (and on a running device, one QDISC at
2062 * least is needed). Ignore negative return value.
2064 qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
2065 if (qdisc_create_ingress(pmd->nlsk_fd,
2066 pmd->remote_if_index) < 0) {
2067 TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
2071 LIST_INIT(&pmd->implicit_flows);
2072 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
2073 tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
2074 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
2075 tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
2077 "%s: failed to create implicit rules.",
2083 rte_eth_dev_probing_finish(dev);
2087 TAP_LOG(ERR, " Disabling rte flow support: %s(%d)",
2088 strerror(errno), errno);
2089 if (strlen(remote_iface)) {
2090 TAP_LOG(ERR, "Remote feature requires flow support.");
2093 rte_eth_dev_probing_finish(dev);
2097 TAP_LOG(ERR, " Can't set up remote feature: %s(%d)",
2098 strerror(errno), errno);
2099 tap_flow_implicit_flush(pmd, NULL);
2102 if (pmd->nlsk_fd != -1)
2103 close(pmd->nlsk_fd);
2104 if (pmd->ka_fd != -1)
2106 if (pmd->ioctl_sock != -1)
2107 close(pmd->ioctl_sock);
2108 /* mac_addrs must not be freed alone because part of dev_private */
2109 dev->data->mac_addrs = NULL;
2110 rte_eth_dev_release_port(dev);
2111 rte_intr_instance_free(pmd->intr_handle);
2114 TAP_LOG(ERR, "%s Unable to initialize %s",
2115 tuntap_name, rte_vdev_device_name(vdev));
2120 /* make sure name is a possible Linux network device name */
2122 is_valid_iface(const char *name)
2127 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
2131 if (*name == '/' || *name == ':' || isspace(*name))
2139 set_interface_name(const char *key __rte_unused,
2143 char *name = (char *)extra_args;
2146 if (!is_valid_iface(value)) {
2147 TAP_LOG(ERR, "TAP invalid remote interface name (%s)",
2151 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
2153 /* use tap%d which causes kernel to choose next available */
2154 strlcpy(name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2160 set_remote_iface(const char *key __rte_unused,
2164 char *name = (char *)extra_args;
2167 if (!is_valid_iface(value)) {
2168 TAP_LOG(ERR, "TAP invalid remote interface name (%s)",
2172 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
2178 static int parse_user_mac(struct rte_ether_addr *user_mac,
2181 unsigned int index = 0;
2182 char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
2184 if (user_mac == NULL || value == NULL)
2187 strlcpy(mac_temp, value, sizeof(mac_temp));
2188 mac_byte = strtok(mac_temp, ":");
2190 while ((mac_byte != NULL) &&
2191 (strlen(mac_byte) <= 2) &&
2192 (strlen(mac_byte) == strspn(mac_byte,
2193 ETH_TAP_CMP_MAC_FMT))) {
2194 user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
2195 mac_byte = strtok(NULL, ":");
2202 set_mac_type(const char *key __rte_unused,
2206 struct rte_ether_addr *user_mac = extra_args;
2211 if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
2212 static int iface_idx;
2214 /* fixed mac = 00:64:74:61:70:<iface_idx> */
2215 memcpy((char *)user_mac->addr_bytes, "\0dtap",
2216 RTE_ETHER_ADDR_LEN);
2217 user_mac->addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
2222 if (parse_user_mac(user_mac, value) != 6)
2225 TAP_LOG(DEBUG, "TAP user MAC param (%s)", value);
2229 TAP_LOG(ERR, "TAP user MAC (%s) is not in format (%s|%s)",
2230 value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
2235 * Open a TUN interface device. TUN PMD
2236 * 1) sets tap_type as false
2237 * 2) intakes iface as argument.
2238 * 3) as interface is virtual set speed to 10G
2241 rte_pmd_tun_probe(struct rte_vdev_device *dev)
2243 const char *name, *params;
2245 struct rte_kvargs *kvlist = NULL;
2246 char tun_name[RTE_ETH_NAME_MAX_LEN];
2247 char remote_iface[RTE_ETH_NAME_MAX_LEN];
2248 struct rte_eth_dev *eth_dev;
2250 name = rte_vdev_device_name(dev);
2251 params = rte_vdev_device_args(dev);
2252 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
2254 if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
2255 strlen(params) == 0) {
2256 eth_dev = rte_eth_dev_attach_secondary(name);
2258 TAP_LOG(ERR, "Failed to probe %s", name);
2261 eth_dev->dev_ops = &ops;
2262 eth_dev->device = &dev->device;
2263 rte_eth_dev_probing_finish(eth_dev);
2267 /* use tun%d which causes kernel to choose next available */
2268 strlcpy(tun_name, DEFAULT_TUN_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2270 if (params && (params[0] != '\0')) {
2271 TAP_LOG(DEBUG, "parameters (%s)", params);
2273 kvlist = rte_kvargs_parse(params, valid_arguments);
2275 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
2276 ret = rte_kvargs_process(kvlist,
2278 &set_interface_name,
2286 pmd_link.link_speed = RTE_ETH_SPEED_NUM_10G;
2288 TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
2290 ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0,
2291 ETH_TUNTAP_TYPE_TUN);
2295 TAP_LOG(ERR, "Failed to create pmd for %s as %s",
2298 rte_kvargs_free(kvlist);
2303 /* Request queue file descriptors from secondary to primary. */
2305 tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
2308 struct timespec timeout = {.tv_sec = 1, .tv_nsec = 0};
2309 struct rte_mp_msg request, *reply;
2310 struct rte_mp_reply replies;
2311 struct ipc_queues *request_param = (struct ipc_queues *)request.param;
2312 struct ipc_queues *reply_param;
2313 struct pmd_process_private *process_private = dev->process_private;
2314 int queue, fd_iterator;
2316 /* Prepare the request */
2317 memset(&request, 0, sizeof(request));
2318 strlcpy(request.name, TAP_MP_KEY, sizeof(request.name));
2319 strlcpy(request_param->port_name, port_name,
2320 sizeof(request_param->port_name));
2321 request.len_param = sizeof(*request_param);
2322 /* Send request and receive reply */
2323 ret = rte_mp_request_sync(&request, &replies, &timeout);
2324 if (ret < 0 || replies.nb_received != 1) {
2325 TAP_LOG(ERR, "Failed to request queues from primary: %d",
2329 reply = &replies.msgs[0];
2330 reply_param = (struct ipc_queues *)reply->param;
2331 TAP_LOG(DEBUG, "Received IPC reply for %s", reply_param->port_name);
2333 /* Attach the queues from received file descriptors */
2334 if (reply_param->rxq_count + reply_param->txq_count != reply->num_fds) {
2335 TAP_LOG(ERR, "Unexpected number of fds received");
2339 dev->data->nb_rx_queues = reply_param->rxq_count;
2340 dev->data->nb_tx_queues = reply_param->txq_count;
2342 for (queue = 0; queue < reply_param->rxq_count; queue++)
2343 process_private->rxq_fds[queue] = reply->fds[fd_iterator++];
2344 for (queue = 0; queue < reply_param->txq_count; queue++)
2345 process_private->txq_fds[queue] = reply->fds[fd_iterator++];
2350 /* Send the queue file descriptors from the primary process to secondary. */
2352 tap_mp_sync_queues(const struct rte_mp_msg *request, const void *peer)
2354 struct rte_eth_dev *dev;
2355 struct pmd_process_private *process_private;
2356 struct rte_mp_msg reply;
2357 const struct ipc_queues *request_param =
2358 (const struct ipc_queues *)request->param;
2359 struct ipc_queues *reply_param =
2360 (struct ipc_queues *)reply.param;
2365 /* Get requested port */
2366 TAP_LOG(DEBUG, "Received IPC request for %s", request_param->port_name);
2367 ret = rte_eth_dev_get_port_by_name(request_param->port_name, &port_id);
2369 TAP_LOG(ERR, "Failed to get port id for %s",
2370 request_param->port_name);
2373 dev = &rte_eth_devices[port_id];
2374 process_private = dev->process_private;
2376 /* Fill file descriptors for all queues */
2378 reply_param->rxq_count = 0;
2379 if (dev->data->nb_rx_queues + dev->data->nb_tx_queues >
2381 TAP_LOG(ERR, "Number of rx/tx queues exceeds max number of fds");
2385 for (queue = 0; queue < dev->data->nb_rx_queues; queue++) {
2386 reply.fds[reply.num_fds++] = process_private->rxq_fds[queue];
2387 reply_param->rxq_count++;
2389 RTE_ASSERT(reply_param->rxq_count == dev->data->nb_rx_queues);
2391 reply_param->txq_count = 0;
2392 for (queue = 0; queue < dev->data->nb_tx_queues; queue++) {
2393 reply.fds[reply.num_fds++] = process_private->txq_fds[queue];
2394 reply_param->txq_count++;
2396 RTE_ASSERT(reply_param->txq_count == dev->data->nb_tx_queues);
2399 strlcpy(reply.name, request->name, sizeof(reply.name));
2400 strlcpy(reply_param->port_name, request_param->port_name,
2401 sizeof(reply_param->port_name));
2402 reply.len_param = sizeof(*reply_param);
2403 if (rte_mp_reply(&reply, peer) < 0) {
2404 TAP_LOG(ERR, "Failed to reply an IPC request to sync queues");
2410 /* Open a TAP interface device.
2413 rte_pmd_tap_probe(struct rte_vdev_device *dev)
2415 const char *name, *params;
2417 struct rte_kvargs *kvlist = NULL;
2419 char tap_name[RTE_ETH_NAME_MAX_LEN];
2420 char remote_iface[RTE_ETH_NAME_MAX_LEN];
2421 struct rte_ether_addr user_mac = { .addr_bytes = {0} };
2422 struct rte_eth_dev *eth_dev;
2423 int tap_devices_count_increased = 0;
2425 name = rte_vdev_device_name(dev);
2426 params = rte_vdev_device_args(dev);
2428 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
2429 eth_dev = rte_eth_dev_attach_secondary(name);
2431 TAP_LOG(ERR, "Failed to probe %s", name);
2434 eth_dev->dev_ops = &ops;
2435 eth_dev->device = &dev->device;
2436 eth_dev->rx_pkt_burst = pmd_rx_burst;
2437 eth_dev->tx_pkt_burst = pmd_tx_burst;
2438 if (!rte_eal_primary_proc_alive(NULL)) {
2439 TAP_LOG(ERR, "Primary process is missing");
2442 eth_dev->process_private = (struct pmd_process_private *)
2443 rte_zmalloc_socket(name,
2444 sizeof(struct pmd_process_private),
2445 RTE_CACHE_LINE_SIZE,
2446 eth_dev->device->numa_node);
2447 if (eth_dev->process_private == NULL) {
2449 "Failed to alloc memory for process private");
2453 ret = tap_mp_attach_queues(name, eth_dev);
2456 rte_eth_dev_probing_finish(eth_dev);
2460 speed = RTE_ETH_SPEED_NUM_10G;
2462 /* use tap%d which causes kernel to choose next available */
2463 strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2464 memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
2466 if (params && (params[0] != '\0')) {
2467 TAP_LOG(DEBUG, "parameters (%s)", params);
2469 kvlist = rte_kvargs_parse(params, valid_arguments);
2471 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
2472 ret = rte_kvargs_process(kvlist,
2474 &set_interface_name,
2480 if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
2481 ret = rte_kvargs_process(kvlist,
2489 if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
2490 ret = rte_kvargs_process(kvlist,
2499 pmd_link.link_speed = speed;
2501 TAP_LOG(DEBUG, "Initializing pmd_tap for %s", name);
2503 /* Register IPC feed callback */
2504 if (!tap_devices_count) {
2505 ret = rte_mp_action_register(TAP_MP_KEY, tap_mp_sync_queues);
2506 if (ret < 0 && rte_errno != ENOTSUP) {
2507 TAP_LOG(ERR, "tap: Failed to register IPC callback: %s",
2508 strerror(rte_errno));
2512 tap_devices_count++;
2513 tap_devices_count_increased = 1;
2514 ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac,
2515 ETH_TUNTAP_TYPE_TAP);
2519 TAP_LOG(ERR, "Failed to create pmd for %s as %s",
2521 if (tap_devices_count_increased == 1) {
2522 if (tap_devices_count == 1)
2523 rte_mp_action_unregister(TAP_MP_KEY);
2524 tap_devices_count--;
2527 rte_kvargs_free(kvlist);
2532 /* detach a TUNTAP device.
2535 rte_pmd_tap_remove(struct rte_vdev_device *dev)
2537 struct rte_eth_dev *eth_dev = NULL;
2539 /* find the ethdev entry */
2540 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
2544 tap_dev_close(eth_dev);
2545 rte_eth_dev_release_port(eth_dev);
2550 static struct rte_vdev_driver pmd_tun_drv = {
2551 .probe = rte_pmd_tun_probe,
2552 .remove = rte_pmd_tap_remove,
2555 static struct rte_vdev_driver pmd_tap_drv = {
2556 .probe = rte_pmd_tap_probe,
2557 .remove = rte_pmd_tap_remove,
2560 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
2561 RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv);
2562 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
2563 RTE_PMD_REGISTER_PARAM_STRING(net_tun,
2564 ETH_TAP_IFACE_ARG "=<string> ");
2565 RTE_PMD_REGISTER_PARAM_STRING(net_tap,
2566 ETH_TAP_IFACE_ARG "=<string> "
2567 ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
2568 ETH_TAP_REMOTE_ARG "=<string>");
2569 RTE_LOG_REGISTER_DEFAULT(tap_logtype, NOTICE);