mbuf: add rte prefix to offload flags
[dpdk.git] / drivers / net / tap / rte_eth_tap.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <rte_atomic.h>
6 #include <rte_branch_prediction.h>
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_mbuf.h>
10 #include <ethdev_driver.h>
11 #include <ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_kvargs.h>
15 #include <rte_net.h>
16 #include <rte_debug.h>
17 #include <rte_ip.h>
18 #include <rte_string_fns.h>
19 #include <rte_ethdev.h>
20 #include <rte_errno.h>
21 #include <rte_cycles.h>
22
23 #include <sys/types.h>
24 #include <sys/stat.h>
25 #include <sys/socket.h>
26 #include <sys/ioctl.h>
27 #include <sys/utsname.h>
28 #include <sys/mman.h>
29 #include <errno.h>
30 #include <signal.h>
31 #include <stdbool.h>
32 #include <stdint.h>
33 #include <sys/uio.h>
34 #include <unistd.h>
35 #include <arpa/inet.h>
36 #include <net/if.h>
37 #include <linux/if_tun.h>
38 #include <linux/if_ether.h>
39 #include <fcntl.h>
40 #include <ctype.h>
41
42 #include <tap_rss.h>
43 #include <rte_eth_tap.h>
44 #include <tap_flow.h>
45 #include <tap_netlink.h>
46 #include <tap_tcmsgs.h>
47
48 /* Linux based path to the TUN device */
49 #define TUN_TAP_DEV_PATH        "/dev/net/tun"
50 #define DEFAULT_TAP_NAME        "dtap"
51 #define DEFAULT_TUN_NAME        "dtun"
52
53 #define ETH_TAP_IFACE_ARG       "iface"
54 #define ETH_TAP_REMOTE_ARG      "remote"
55 #define ETH_TAP_MAC_ARG         "mac"
56 #define ETH_TAP_MAC_FIXED       "fixed"
57
58 #define ETH_TAP_USR_MAC_FMT     "xx:xx:xx:xx:xx:xx"
59 #define ETH_TAP_CMP_MAC_FMT     "0123456789ABCDEFabcdef"
60 #define ETH_TAP_MAC_ARG_FMT     ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
61
62 #define TAP_GSO_MBUFS_PER_CORE  128
63 #define TAP_GSO_MBUF_SEG_SIZE   128
64 #define TAP_GSO_MBUF_CACHE_SIZE 4
65 #define TAP_GSO_MBUFS_NUM \
66         (TAP_GSO_MBUFS_PER_CORE * TAP_GSO_MBUF_CACHE_SIZE)
67
68 /* IPC key for queue fds sync */
69 #define TAP_MP_KEY "tap_mp_sync_queues"
70
71 #define TAP_IOV_DEFAULT_MAX 1024
72
73 #define TAP_RX_OFFLOAD (DEV_RX_OFFLOAD_SCATTER |        \
74                         DEV_RX_OFFLOAD_IPV4_CKSUM |     \
75                         DEV_RX_OFFLOAD_UDP_CKSUM |      \
76                         DEV_RX_OFFLOAD_TCP_CKSUM)
77
78 #define TAP_TX_OFFLOAD (DEV_TX_OFFLOAD_MULTI_SEGS |     \
79                         DEV_TX_OFFLOAD_IPV4_CKSUM |     \
80                         DEV_TX_OFFLOAD_UDP_CKSUM |      \
81                         DEV_TX_OFFLOAD_TCP_CKSUM |      \
82                         DEV_TX_OFFLOAD_TCP_TSO)
83
84 static int tap_devices_count;
85
86 static const char *tuntap_types[ETH_TUNTAP_TYPE_MAX] = {
87         "UNKNOWN", "TUN", "TAP"
88 };
89
90 static const char *valid_arguments[] = {
91         ETH_TAP_IFACE_ARG,
92         ETH_TAP_REMOTE_ARG,
93         ETH_TAP_MAC_ARG,
94         NULL
95 };
96
97 static volatile uint32_t tap_trigger;   /* Rx trigger */
98
99 static struct rte_eth_link pmd_link = {
100         .link_speed = ETH_SPEED_NUM_10G,
101         .link_duplex = ETH_LINK_FULL_DUPLEX,
102         .link_status = ETH_LINK_DOWN,
103         .link_autoneg = ETH_LINK_FIXED,
104 };
105
106 static void
107 tap_trigger_cb(int sig __rte_unused)
108 {
109         /* Valid trigger values are nonzero */
110         tap_trigger = (tap_trigger + 1) | 0x80000000;
111 }
112
113 /* Specifies on what netdevices the ioctl should be applied */
114 enum ioctl_mode {
115         LOCAL_AND_REMOTE,
116         LOCAL_ONLY,
117         REMOTE_ONLY,
118 };
119
120 /* Message header to synchronize queues via IPC */
121 struct ipc_queues {
122         char port_name[RTE_DEV_NAME_MAX_LEN];
123         int rxq_count;
124         int txq_count;
125         /*
126          * The file descriptors are in the dedicated part
127          * of the Unix message to be translated by the kernel.
128          */
129 };
130
131 static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
132
133 /**
134  * Tun/Tap allocation routine
135  *
136  * @param[in] pmd
137  *   Pointer to private structure.
138  *
139  * @param[in] is_keepalive
140  *   Keepalive flag
141  *
142  * @return
143  *   -1 on failure, fd on success
144  */
145 static int
146 tun_alloc(struct pmd_internals *pmd, int is_keepalive)
147 {
148         struct ifreq ifr;
149 #ifdef IFF_MULTI_QUEUE
150         unsigned int features;
151 #endif
152         int fd, signo, flags;
153
154         memset(&ifr, 0, sizeof(struct ifreq));
155
156         /*
157          * Do not set IFF_NO_PI as packet information header will be needed
158          * to check if a received packet has been truncated.
159          */
160         ifr.ifr_flags = (pmd->type == ETH_TUNTAP_TYPE_TAP) ?
161                 IFF_TAP : IFF_TUN | IFF_POINTOPOINT;
162         strlcpy(ifr.ifr_name, pmd->name, IFNAMSIZ);
163
164         fd = open(TUN_TAP_DEV_PATH, O_RDWR);
165         if (fd < 0) {
166                 TAP_LOG(ERR, "Unable to open %s interface", TUN_TAP_DEV_PATH);
167                 goto error;
168         }
169
170 #ifdef IFF_MULTI_QUEUE
171         /* Grab the TUN features to verify we can work multi-queue */
172         if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
173                 TAP_LOG(ERR, "unable to get TUN/TAP features");
174                 goto error;
175         }
176         TAP_LOG(DEBUG, "%s Features %08x", TUN_TAP_DEV_PATH, features);
177
178         if (features & IFF_MULTI_QUEUE) {
179                 TAP_LOG(DEBUG, "  Multi-queue support for %d queues",
180                         RTE_PMD_TAP_MAX_QUEUES);
181                 ifr.ifr_flags |= IFF_MULTI_QUEUE;
182         } else
183 #endif
184         {
185                 ifr.ifr_flags |= IFF_ONE_QUEUE;
186                 TAP_LOG(DEBUG, "  Single queue only support");
187         }
188
189         /* Set the TUN/TAP configuration and set the name if needed */
190         if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
191                 TAP_LOG(WARNING, "Unable to set TUNSETIFF for %s: %s",
192                         ifr.ifr_name, strerror(errno));
193                 goto error;
194         }
195
196         /*
197          * Name passed to kernel might be wildcard like dtun%d
198          * and need to find the resulting device.
199          */
200         TAP_LOG(DEBUG, "Device name is '%s'", ifr.ifr_name);
201         strlcpy(pmd->name, ifr.ifr_name, RTE_ETH_NAME_MAX_LEN);
202
203         if (is_keepalive) {
204                 /*
205                  * Detach the TUN/TAP keep-alive queue
206                  * to avoid traffic through it
207                  */
208                 ifr.ifr_flags = IFF_DETACH_QUEUE;
209                 if (ioctl(fd, TUNSETQUEUE, (void *)&ifr) < 0) {
210                         TAP_LOG(WARNING,
211                                 "Unable to detach keep-alive queue for %s: %s",
212                                 ifr.ifr_name, strerror(errno));
213                         goto error;
214                 }
215         }
216
217         flags = fcntl(fd, F_GETFL);
218         if (flags == -1) {
219                 TAP_LOG(WARNING,
220                         "Unable to get %s current flags\n",
221                         ifr.ifr_name);
222                 goto error;
223         }
224
225         /* Always set the file descriptor to non-blocking */
226         flags |= O_NONBLOCK;
227         if (fcntl(fd, F_SETFL, flags) < 0) {
228                 TAP_LOG(WARNING,
229                         "Unable to set %s to nonblocking: %s",
230                         ifr.ifr_name, strerror(errno));
231                 goto error;
232         }
233
234         /* Find a free realtime signal */
235         for (signo = SIGRTMIN + 1; signo < SIGRTMAX; signo++) {
236                 struct sigaction sa;
237
238                 if (sigaction(signo, NULL, &sa) == -1) {
239                         TAP_LOG(WARNING,
240                                 "Unable to get current rt-signal %d handler",
241                                 signo);
242                         goto error;
243                 }
244
245                 /* Already have the handler we want on this signal  */
246                 if (sa.sa_handler == tap_trigger_cb)
247                         break;
248
249                 /* Is handler in use by application */
250                 if (sa.sa_handler != SIG_DFL) {
251                         TAP_LOG(DEBUG,
252                                 "Skipping used rt-signal %d", signo);
253                         continue;
254                 }
255
256                 sa = (struct sigaction) {
257                         .sa_flags = SA_RESTART,
258                         .sa_handler = tap_trigger_cb,
259                 };
260
261                 if (sigaction(signo, &sa, NULL) == -1) {
262                         TAP_LOG(WARNING,
263                                 "Unable to set rt-signal %d handler\n", signo);
264                         goto error;
265                 }
266
267                 /* Found a good signal to use */
268                 TAP_LOG(DEBUG,
269                         "Using rt-signal %d", signo);
270                 break;
271         }
272
273         if (signo == SIGRTMAX) {
274                 TAP_LOG(WARNING, "All rt-signals are in use\n");
275
276                 /* Disable trigger globally in case of error */
277                 tap_trigger = 0;
278                 TAP_LOG(NOTICE, "No Rx trigger signal available\n");
279         } else {
280                 /* Enable signal on file descriptor */
281                 if (fcntl(fd, F_SETSIG, signo) < 0) {
282                         TAP_LOG(WARNING, "Unable to set signo %d for fd %d: %s",
283                                 signo, fd, strerror(errno));
284                         goto error;
285                 }
286                 if (fcntl(fd, F_SETFL, flags | O_ASYNC) < 0) {
287                         TAP_LOG(WARNING, "Unable to set fcntl flags: %s",
288                                 strerror(errno));
289                         goto error;
290                 }
291
292                 if (fcntl(fd, F_SETOWN, getpid()) < 0) {
293                         TAP_LOG(WARNING, "Unable to set fcntl owner: %s",
294                                 strerror(errno));
295                         goto error;
296                 }
297         }
298         return fd;
299
300 error:
301         if (fd >= 0)
302                 close(fd);
303         return -1;
304 }
305
306 static void
307 tap_verify_csum(struct rte_mbuf *mbuf)
308 {
309         uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
310         uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
311         uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
312         unsigned int l2_len = sizeof(struct rte_ether_hdr);
313         unsigned int l3_len;
314         uint16_t cksum = 0;
315         void *l3_hdr;
316         void *l4_hdr;
317         struct rte_udp_hdr *udp_hdr;
318
319         if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
320                 l2_len += 4;
321         else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
322                 l2_len += 8;
323         /* Don't verify checksum for packets with discontinuous L2 header */
324         if (unlikely(l2_len + sizeof(struct rte_ipv4_hdr) >
325                      rte_pktmbuf_data_len(mbuf)))
326                 return;
327         l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
328         if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
329                 struct rte_ipv4_hdr *iph = l3_hdr;
330
331                 l3_len = rte_ipv4_hdr_len(iph);
332                 if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
333                         return;
334                 /* check that the total length reported by header is not
335                  * greater than the total received size
336                  */
337                 if (l2_len + rte_be_to_cpu_16(iph->total_length) >
338                                 rte_pktmbuf_data_len(mbuf))
339                         return;
340
341                 cksum = ~rte_raw_cksum(iph, l3_len);
342                 mbuf->ol_flags |= cksum ?
343                         RTE_MBUF_F_RX_IP_CKSUM_BAD :
344                         RTE_MBUF_F_RX_IP_CKSUM_GOOD;
345         } else if (l3 == RTE_PTYPE_L3_IPV6) {
346                 struct rte_ipv6_hdr *iph = l3_hdr;
347
348                 l3_len = sizeof(struct rte_ipv6_hdr);
349                 /* check that the total length reported by header is not
350                  * greater than the total received size
351                  */
352                 if (l2_len + l3_len + rte_be_to_cpu_16(iph->payload_len) >
353                                 rte_pktmbuf_data_len(mbuf))
354                         return;
355         } else {
356                 /* - RTE_PTYPE_L3_IPV4_EXT_UNKNOWN cannot happen because
357                  *   mbuf->packet_type is filled by rte_net_get_ptype() which
358                  *   never returns this value.
359                  * - IPv6 extensions are not supported.
360                  */
361                 return;
362         }
363         if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
364                 int cksum_ok;
365
366                 l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
367                 /* Don't verify checksum for multi-segment packets. */
368                 if (mbuf->nb_segs > 1)
369                         return;
370                 if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
371                         if (l4 == RTE_PTYPE_L4_UDP) {
372                                 udp_hdr = (struct rte_udp_hdr *)l4_hdr;
373                                 if (udp_hdr->dgram_cksum == 0) {
374                                         /*
375                                          * For IPv4, a zero UDP checksum
376                                          * indicates that the sender did not
377                                          * generate one [RFC 768].
378                                          */
379                                         mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
380                                         return;
381                                 }
382                         }
383                         cksum_ok = !rte_ipv4_udptcp_cksum_verify(l3_hdr,
384                                                                  l4_hdr);
385                 } else { /* l3 == RTE_PTYPE_L3_IPV6, checked above */
386                         cksum_ok = !rte_ipv6_udptcp_cksum_verify(l3_hdr,
387                                                                  l4_hdr);
388                 }
389                 mbuf->ol_flags |= cksum_ok ?
390                         RTE_MBUF_F_RX_L4_CKSUM_GOOD : RTE_MBUF_F_RX_L4_CKSUM_BAD;
391         }
392 }
393
394 static void
395 tap_rxq_pool_free(struct rte_mbuf *pool)
396 {
397         struct rte_mbuf *mbuf = pool;
398         uint16_t nb_segs = 1;
399
400         if (mbuf == NULL)
401                 return;
402
403         while (mbuf->next) {
404                 mbuf = mbuf->next;
405                 nb_segs++;
406         }
407         pool->nb_segs = nb_segs;
408         rte_pktmbuf_free(pool);
409 }
410
411 /* Callback to handle the rx burst of packets to the correct interface and
412  * file descriptor(s) in a multi-queue setup.
413  */
414 static uint16_t
415 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
416 {
417         struct rx_queue *rxq = queue;
418         struct pmd_process_private *process_private;
419         uint16_t num_rx;
420         unsigned long num_rx_bytes = 0;
421         uint32_t trigger = tap_trigger;
422
423         if (trigger == rxq->trigger_seen)
424                 return 0;
425
426         process_private = rte_eth_devices[rxq->in_port].process_private;
427         for (num_rx = 0; num_rx < nb_pkts; ) {
428                 struct rte_mbuf *mbuf = rxq->pool;
429                 struct rte_mbuf *seg = NULL;
430                 struct rte_mbuf *new_tail = NULL;
431                 uint16_t data_off = rte_pktmbuf_headroom(mbuf);
432                 int len;
433
434                 len = readv(process_private->rxq_fds[rxq->queue_id],
435                         *rxq->iovecs,
436                         1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
437                              rxq->nb_rx_desc : 1));
438                 if (len < (int)sizeof(struct tun_pi))
439                         break;
440
441                 /* Packet couldn't fit in the provided mbuf */
442                 if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
443                         rxq->stats.ierrors++;
444                         continue;
445                 }
446
447                 len -= sizeof(struct tun_pi);
448
449                 mbuf->pkt_len = len;
450                 mbuf->port = rxq->in_port;
451                 while (1) {
452                         struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
453
454                         if (unlikely(!buf)) {
455                                 rxq->stats.rx_nombuf++;
456                                 /* No new buf has been allocated: do nothing */
457                                 if (!new_tail || !seg)
458                                         goto end;
459
460                                 seg->next = NULL;
461                                 tap_rxq_pool_free(mbuf);
462
463                                 goto end;
464                         }
465                         seg = seg ? seg->next : mbuf;
466                         if (rxq->pool == mbuf)
467                                 rxq->pool = buf;
468                         if (new_tail)
469                                 new_tail->next = buf;
470                         new_tail = buf;
471                         new_tail->next = seg->next;
472
473                         /* iovecs[0] is reserved for packet info (pi) */
474                         (*rxq->iovecs)[mbuf->nb_segs].iov_len =
475                                 buf->buf_len - data_off;
476                         (*rxq->iovecs)[mbuf->nb_segs].iov_base =
477                                 (char *)buf->buf_addr + data_off;
478
479                         seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
480                         seg->data_off = data_off;
481
482                         len -= seg->data_len;
483                         if (len <= 0)
484                                 break;
485                         mbuf->nb_segs++;
486                         /* First segment has headroom, not the others */
487                         data_off = 0;
488                 }
489                 seg->next = NULL;
490                 mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
491                                                       RTE_PTYPE_ALL_MASK);
492                 if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
493                         tap_verify_csum(mbuf);
494
495                 /* account for the receive frame */
496                 bufs[num_rx++] = mbuf;
497                 num_rx_bytes += mbuf->pkt_len;
498         }
499 end:
500         rxq->stats.ipackets += num_rx;
501         rxq->stats.ibytes += num_rx_bytes;
502
503         if (trigger && num_rx < nb_pkts)
504                 rxq->trigger_seen = trigger;
505
506         return num_rx;
507 }
508
509 /* Finalize l4 checksum calculation */
510 static void
511 tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
512                 uint32_t l4_raw_cksum)
513 {
514         if (l4_cksum) {
515                 uint32_t cksum;
516
517                 cksum = __rte_raw_cksum_reduce(l4_raw_cksum);
518                 cksum += l4_phdr_cksum;
519
520                 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
521                 cksum = (~cksum) & 0xffff;
522                 if (cksum == 0)
523                         cksum = 0xffff;
524                 *l4_cksum = cksum;
525         }
526 }
527
528 /* Accumaulate L4 raw checksums */
529 static void
530 tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
531                         uint32_t *l4_raw_cksum)
532 {
533         if (l4_cksum == NULL)
534                 return;
535
536         *l4_raw_cksum = __rte_raw_cksum(l4_data, l4_len, *l4_raw_cksum);
537 }
538
539 /* L3 and L4 pseudo headers checksum offloads */
540 static void
541 tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
542                 unsigned int l3_len, unsigned int l4_len, uint16_t **l4_cksum,
543                 uint16_t *l4_phdr_cksum, uint32_t *l4_raw_cksum)
544 {
545         void *l3_hdr = packet + l2_len;
546
547         if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
548                 struct rte_ipv4_hdr *iph = l3_hdr;
549                 uint16_t cksum;
550
551                 iph->hdr_checksum = 0;
552                 cksum = rte_raw_cksum(iph, l3_len);
553                 iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
554         }
555         if (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
556                 void *l4_hdr;
557
558                 l4_hdr = packet + l2_len + l3_len;
559                 if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM)
560                         *l4_cksum = &((struct rte_udp_hdr *)l4_hdr)->dgram_cksum;
561                 else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM)
562                         *l4_cksum = &((struct rte_tcp_hdr *)l4_hdr)->cksum;
563                 else
564                         return;
565                 **l4_cksum = 0;
566                 if (ol_flags & RTE_MBUF_F_TX_IPV4)
567                         *l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
568                 else
569                         *l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
570                 *l4_raw_cksum = __rte_raw_cksum(l4_hdr, l4_len, 0);
571         }
572 }
573
574 static inline int
575 tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
576                         struct rte_mbuf **pmbufs,
577                         uint16_t *num_packets, unsigned long *num_tx_bytes)
578 {
579         int i;
580         uint16_t l234_hlen;
581         struct pmd_process_private *process_private;
582
583         process_private = rte_eth_devices[txq->out_port].process_private;
584
585         for (i = 0; i < num_mbufs; i++) {
586                 struct rte_mbuf *mbuf = pmbufs[i];
587                 struct iovec iovecs[mbuf->nb_segs + 2];
588                 struct tun_pi pi = { .flags = 0, .proto = 0x00 };
589                 struct rte_mbuf *seg = mbuf;
590                 char m_copy[mbuf->data_len];
591                 int proto;
592                 int n;
593                 int j;
594                 int k; /* current index in iovecs for copying segments */
595                 uint16_t seg_len; /* length of first segment */
596                 uint16_t nb_segs;
597                 uint16_t *l4_cksum; /* l4 checksum (pseudo header + payload) */
598                 uint32_t l4_raw_cksum = 0; /* TCP/UDP payload raw checksum */
599                 uint16_t l4_phdr_cksum = 0; /* TCP/UDP pseudo header checksum */
600                 uint16_t is_cksum = 0; /* in case cksum should be offloaded */
601
602                 l4_cksum = NULL;
603                 if (txq->type == ETH_TUNTAP_TYPE_TUN) {
604                         /*
605                          * TUN and TAP are created with IFF_NO_PI disabled.
606                          * For TUN PMD this mandatory as fields are used by
607                          * Kernel tun.c to determine whether its IP or non IP
608                          * packets.
609                          *
610                          * The logic fetches the first byte of data from mbuf
611                          * then compares whether its v4 or v6. If first byte
612                          * is 4 or 6, then protocol field is updated.
613                          */
614                         char *buff_data = rte_pktmbuf_mtod(seg, void *);
615                         proto = (*buff_data & 0xf0);
616                         pi.proto = (proto == 0x40) ?
617                                 rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4) :
618                                 ((proto == 0x60) ?
619                                         rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6) :
620                                         0x00);
621                 }
622
623                 k = 0;
624                 iovecs[k].iov_base = &pi;
625                 iovecs[k].iov_len = sizeof(pi);
626                 k++;
627
628                 nb_segs = mbuf->nb_segs;
629                 if (txq->csum &&
630                     ((mbuf->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4) ||
631                       (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM ||
632                       (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM))) {
633                         is_cksum = 1;
634
635                         /* Support only packets with at least layer 4
636                          * header included in the first segment
637                          */
638                         seg_len = rte_pktmbuf_data_len(mbuf);
639                         l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
640                         if (seg_len < l234_hlen)
641                                 return -1;
642
643                         /* To change checksums, work on a * copy of l2, l3
644                          * headers + l4 pseudo header
645                          */
646                         rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
647                                         l234_hlen);
648                         tap_tx_l3_cksum(m_copy, mbuf->ol_flags,
649                                        mbuf->l2_len, mbuf->l3_len, mbuf->l4_len,
650                                        &l4_cksum, &l4_phdr_cksum,
651                                        &l4_raw_cksum);
652                         iovecs[k].iov_base = m_copy;
653                         iovecs[k].iov_len = l234_hlen;
654                         k++;
655
656                         /* Update next iovecs[] beyond l2, l3, l4 headers */
657                         if (seg_len > l234_hlen) {
658                                 iovecs[k].iov_len = seg_len - l234_hlen;
659                                 iovecs[k].iov_base =
660                                         rte_pktmbuf_mtod(seg, char *) +
661                                                 l234_hlen;
662                                 tap_tx_l4_add_rcksum(iovecs[k].iov_base,
663                                         iovecs[k].iov_len, l4_cksum,
664                                         &l4_raw_cksum);
665                                 k++;
666                                 nb_segs++;
667                         }
668                         seg = seg->next;
669                 }
670
671                 for (j = k; j <= nb_segs; j++) {
672                         iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
673                         iovecs[j].iov_base = rte_pktmbuf_mtod(seg, void *);
674                         if (is_cksum)
675                                 tap_tx_l4_add_rcksum(iovecs[j].iov_base,
676                                         iovecs[j].iov_len, l4_cksum,
677                                         &l4_raw_cksum);
678                         seg = seg->next;
679                 }
680
681                 if (is_cksum)
682                         tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
683
684                 /* copy the tx frame data */
685                 n = writev(process_private->txq_fds[txq->queue_id], iovecs, j);
686                 if (n <= 0)
687                         return -1;
688
689                 (*num_packets)++;
690                 (*num_tx_bytes) += rte_pktmbuf_pkt_len(mbuf);
691         }
692         return 0;
693 }
694
695 /* Callback to handle sending packets from the tap interface
696  */
697 static uint16_t
698 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
699 {
700         struct tx_queue *txq = queue;
701         uint16_t num_tx = 0;
702         uint16_t num_packets = 0;
703         unsigned long num_tx_bytes = 0;
704         uint32_t max_size;
705         int i;
706
707         if (unlikely(nb_pkts == 0))
708                 return 0;
709
710         struct rte_mbuf *gso_mbufs[MAX_GSO_MBUFS];
711         max_size = *txq->mtu + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4);
712         for (i = 0; i < nb_pkts; i++) {
713                 struct rte_mbuf *mbuf_in = bufs[num_tx];
714                 struct rte_mbuf **mbuf;
715                 uint16_t num_mbufs = 0;
716                 uint16_t tso_segsz = 0;
717                 int ret;
718                 int num_tso_mbufs;
719                 uint16_t hdrs_len;
720                 uint64_t tso;
721
722                 tso = mbuf_in->ol_flags & RTE_MBUF_F_TX_TCP_SEG;
723                 if (tso) {
724                         struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
725
726                         /* TCP segmentation implies TCP checksum offload */
727                         mbuf_in->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
728
729                         /* gso size is calculated without RTE_ETHER_CRC_LEN */
730                         hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
731                                         mbuf_in->l4_len;
732                         tso_segsz = mbuf_in->tso_segsz + hdrs_len;
733                         if (unlikely(tso_segsz == hdrs_len) ||
734                                 tso_segsz > *txq->mtu) {
735                                 txq->stats.errs++;
736                                 break;
737                         }
738                         gso_ctx->gso_size = tso_segsz;
739                         /* 'mbuf_in' packet to segment */
740                         num_tso_mbufs = rte_gso_segment(mbuf_in,
741                                 gso_ctx, /* gso control block */
742                                 (struct rte_mbuf **)&gso_mbufs, /* out mbufs */
743                                 RTE_DIM(gso_mbufs)); /* max tso mbufs */
744
745                         /* ret contains the number of new created mbufs */
746                         if (num_tso_mbufs < 0)
747                                 break;
748
749                         if (num_tso_mbufs >= 1) {
750                                 mbuf = gso_mbufs;
751                                 num_mbufs = num_tso_mbufs;
752                         } else {
753                                 /* 0 means it can be transmitted directly
754                                  * without gso.
755                                  */
756                                 mbuf = &mbuf_in;
757                                 num_mbufs = 1;
758                         }
759                 } else {
760                         /* stats.errs will be incremented */
761                         if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
762                                 break;
763
764                         /* ret 0 indicates no new mbufs were created */
765                         num_tso_mbufs = 0;
766                         mbuf = &mbuf_in;
767                         num_mbufs = 1;
768                 }
769
770                 ret = tap_write_mbufs(txq, num_mbufs, mbuf,
771                                 &num_packets, &num_tx_bytes);
772                 if (ret == -1) {
773                         txq->stats.errs++;
774                         /* free tso mbufs */
775                         if (num_tso_mbufs > 0)
776                                 rte_pktmbuf_free_bulk(mbuf, num_tso_mbufs);
777                         break;
778                 }
779                 num_tx++;
780                 /* free original mbuf */
781                 rte_pktmbuf_free(mbuf_in);
782                 /* free tso mbufs */
783                 if (num_tso_mbufs > 0)
784                         rte_pktmbuf_free_bulk(mbuf, num_tso_mbufs);
785         }
786
787         txq->stats.opackets += num_packets;
788         txq->stats.errs += nb_pkts - num_tx;
789         txq->stats.obytes += num_tx_bytes;
790
791         return num_tx;
792 }
793
794 static const char *
795 tap_ioctl_req2str(unsigned long request)
796 {
797         switch (request) {
798         case SIOCSIFFLAGS:
799                 return "SIOCSIFFLAGS";
800         case SIOCGIFFLAGS:
801                 return "SIOCGIFFLAGS";
802         case SIOCGIFHWADDR:
803                 return "SIOCGIFHWADDR";
804         case SIOCSIFHWADDR:
805                 return "SIOCSIFHWADDR";
806         case SIOCSIFMTU:
807                 return "SIOCSIFMTU";
808         }
809         return "UNKNOWN";
810 }
811
812 static int
813 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
814           struct ifreq *ifr, int set, enum ioctl_mode mode)
815 {
816         short req_flags = ifr->ifr_flags;
817         int remote = pmd->remote_if_index &&
818                 (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
819
820         if (!pmd->remote_if_index && mode == REMOTE_ONLY)
821                 return 0;
822         /*
823          * If there is a remote netdevice, apply ioctl on it, then apply it on
824          * the tap netdevice.
825          */
826 apply:
827         if (remote)
828                 strlcpy(ifr->ifr_name, pmd->remote_iface, IFNAMSIZ);
829         else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
830                 strlcpy(ifr->ifr_name, pmd->name, IFNAMSIZ);
831         switch (request) {
832         case SIOCSIFFLAGS:
833                 /* fetch current flags to leave other flags untouched */
834                 if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
835                         goto error;
836                 if (set)
837                         ifr->ifr_flags |= req_flags;
838                 else
839                         ifr->ifr_flags &= ~req_flags;
840                 break;
841         case SIOCGIFFLAGS:
842         case SIOCGIFHWADDR:
843         case SIOCSIFHWADDR:
844         case SIOCSIFMTU:
845                 break;
846         default:
847                 TAP_LOG(WARNING, "%s: ioctl() called with wrong arg",
848                         pmd->name);
849                 return -EINVAL;
850         }
851         if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
852                 goto error;
853         if (remote-- && mode == LOCAL_AND_REMOTE)
854                 goto apply;
855         return 0;
856
857 error:
858         TAP_LOG(DEBUG, "%s(%s) failed: %s(%d)", ifr->ifr_name,
859                 tap_ioctl_req2str(request), strerror(errno), errno);
860         return -errno;
861 }
862
863 static int
864 tap_link_set_down(struct rte_eth_dev *dev)
865 {
866         struct pmd_internals *pmd = dev->data->dev_private;
867         struct ifreq ifr = { .ifr_flags = IFF_UP };
868
869         dev->data->dev_link.link_status = ETH_LINK_DOWN;
870         return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
871 }
872
873 static int
874 tap_link_set_up(struct rte_eth_dev *dev)
875 {
876         struct pmd_internals *pmd = dev->data->dev_private;
877         struct ifreq ifr = { .ifr_flags = IFF_UP };
878
879         dev->data->dev_link.link_status = ETH_LINK_UP;
880         return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
881 }
882
883 static int
884 tap_dev_start(struct rte_eth_dev *dev)
885 {
886         int err, i;
887
888         err = tap_intr_handle_set(dev, 1);
889         if (err)
890                 return err;
891
892         err = tap_link_set_up(dev);
893         if (err)
894                 return err;
895
896         for (i = 0; i < dev->data->nb_tx_queues; i++)
897                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
898         for (i = 0; i < dev->data->nb_rx_queues; i++)
899                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
900
901         return err;
902 }
903
904 /* This function gets called when the current port gets stopped.
905  */
906 static int
907 tap_dev_stop(struct rte_eth_dev *dev)
908 {
909         int i;
910
911         for (i = 0; i < dev->data->nb_tx_queues; i++)
912                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
913         for (i = 0; i < dev->data->nb_rx_queues; i++)
914                 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
915
916         tap_intr_handle_set(dev, 0);
917         tap_link_set_down(dev);
918
919         return 0;
920 }
921
922 static int
923 tap_dev_configure(struct rte_eth_dev *dev)
924 {
925         struct pmd_internals *pmd = dev->data->dev_private;
926
927         if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
928                 TAP_LOG(ERR,
929                         "%s: number of rx queues %d exceeds max num of queues %d",
930                         dev->device->name,
931                         dev->data->nb_rx_queues,
932                         RTE_PMD_TAP_MAX_QUEUES);
933                 return -1;
934         }
935         if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
936                 TAP_LOG(ERR,
937                         "%s: number of tx queues %d exceeds max num of queues %d",
938                         dev->device->name,
939                         dev->data->nb_tx_queues,
940                         RTE_PMD_TAP_MAX_QUEUES);
941                 return -1;
942         }
943
944         TAP_LOG(INFO, "%s: %s: TX configured queues number: %u",
945                 dev->device->name, pmd->name, dev->data->nb_tx_queues);
946
947         TAP_LOG(INFO, "%s: %s: RX configured queues number: %u",
948                 dev->device->name, pmd->name, dev->data->nb_rx_queues);
949
950         return 0;
951 }
952
953 static uint32_t
954 tap_dev_speed_capa(void)
955 {
956         uint32_t speed = pmd_link.link_speed;
957         uint32_t capa = 0;
958
959         if (speed >= ETH_SPEED_NUM_10M)
960                 capa |= ETH_LINK_SPEED_10M;
961         if (speed >= ETH_SPEED_NUM_100M)
962                 capa |= ETH_LINK_SPEED_100M;
963         if (speed >= ETH_SPEED_NUM_1G)
964                 capa |= ETH_LINK_SPEED_1G;
965         if (speed >= ETH_SPEED_NUM_5G)
966                 capa |= ETH_LINK_SPEED_2_5G;
967         if (speed >= ETH_SPEED_NUM_5G)
968                 capa |= ETH_LINK_SPEED_5G;
969         if (speed >= ETH_SPEED_NUM_10G)
970                 capa |= ETH_LINK_SPEED_10G;
971         if (speed >= ETH_SPEED_NUM_20G)
972                 capa |= ETH_LINK_SPEED_20G;
973         if (speed >= ETH_SPEED_NUM_25G)
974                 capa |= ETH_LINK_SPEED_25G;
975         if (speed >= ETH_SPEED_NUM_40G)
976                 capa |= ETH_LINK_SPEED_40G;
977         if (speed >= ETH_SPEED_NUM_50G)
978                 capa |= ETH_LINK_SPEED_50G;
979         if (speed >= ETH_SPEED_NUM_56G)
980                 capa |= ETH_LINK_SPEED_56G;
981         if (speed >= ETH_SPEED_NUM_100G)
982                 capa |= ETH_LINK_SPEED_100G;
983
984         return capa;
985 }
986
987 static int
988 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
989 {
990         struct pmd_internals *internals = dev->data->dev_private;
991
992         dev_info->if_index = internals->if_index;
993         dev_info->max_mac_addrs = 1;
994         dev_info->max_rx_pktlen = (uint32_t)RTE_ETHER_MAX_VLAN_FRAME_LEN;
995         dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
996         dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
997         dev_info->min_rx_bufsize = 0;
998         dev_info->speed_capa = tap_dev_speed_capa();
999         dev_info->rx_queue_offload_capa = TAP_RX_OFFLOAD;
1000         dev_info->rx_offload_capa = dev_info->rx_queue_offload_capa;
1001         dev_info->tx_queue_offload_capa = TAP_TX_OFFLOAD;
1002         dev_info->tx_offload_capa = dev_info->tx_queue_offload_capa;
1003         dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
1004         /*
1005          * limitation: TAP supports all of IP, UDP and TCP hash
1006          * functions together and not in partial combinations
1007          */
1008         dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
1009
1010         return 0;
1011 }
1012
1013 static int
1014 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
1015 {
1016         unsigned int i, imax;
1017         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
1018         unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
1019         unsigned long rx_nombuf = 0, ierrors = 0;
1020         const struct pmd_internals *pmd = dev->data->dev_private;
1021
1022         /* rx queue statistics */
1023         imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1024                 dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1025         for (i = 0; i < imax; i++) {
1026                 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
1027                 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
1028                 rx_total += tap_stats->q_ipackets[i];
1029                 rx_bytes_total += tap_stats->q_ibytes[i];
1030                 rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
1031                 ierrors += pmd->rxq[i].stats.ierrors;
1032         }
1033
1034         /* tx queue statistics */
1035         imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1036                 dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1037
1038         for (i = 0; i < imax; i++) {
1039                 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
1040                 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
1041                 tx_total += tap_stats->q_opackets[i];
1042                 tx_err_total += pmd->txq[i].stats.errs;
1043                 tx_bytes_total += tap_stats->q_obytes[i];
1044         }
1045
1046         tap_stats->ipackets = rx_total;
1047         tap_stats->ibytes = rx_bytes_total;
1048         tap_stats->ierrors = ierrors;
1049         tap_stats->rx_nombuf = rx_nombuf;
1050         tap_stats->opackets = tx_total;
1051         tap_stats->oerrors = tx_err_total;
1052         tap_stats->obytes = tx_bytes_total;
1053         return 0;
1054 }
1055
1056 static int
1057 tap_stats_reset(struct rte_eth_dev *dev)
1058 {
1059         int i;
1060         struct pmd_internals *pmd = dev->data->dev_private;
1061
1062         for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1063                 pmd->rxq[i].stats.ipackets = 0;
1064                 pmd->rxq[i].stats.ibytes = 0;
1065                 pmd->rxq[i].stats.ierrors = 0;
1066                 pmd->rxq[i].stats.rx_nombuf = 0;
1067
1068                 pmd->txq[i].stats.opackets = 0;
1069                 pmd->txq[i].stats.errs = 0;
1070                 pmd->txq[i].stats.obytes = 0;
1071         }
1072
1073         return 0;
1074 }
1075
1076 static int
1077 tap_dev_close(struct rte_eth_dev *dev)
1078 {
1079         int i;
1080         struct pmd_internals *internals = dev->data->dev_private;
1081         struct pmd_process_private *process_private = dev->process_private;
1082         struct rx_queue *rxq;
1083
1084         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1085                 rte_free(dev->process_private);
1086                 return 0;
1087         }
1088
1089         tap_link_set_down(dev);
1090         if (internals->nlsk_fd != -1) {
1091                 tap_flow_flush(dev, NULL);
1092                 tap_flow_implicit_flush(internals, NULL);
1093                 tap_nl_final(internals->nlsk_fd);
1094                 internals->nlsk_fd = -1;
1095         }
1096
1097         for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1098                 if (process_private->rxq_fds[i] != -1) {
1099                         rxq = &internals->rxq[i];
1100                         close(process_private->rxq_fds[i]);
1101                         process_private->rxq_fds[i] = -1;
1102                         tap_rxq_pool_free(rxq->pool);
1103                         rte_free(rxq->iovecs);
1104                         rxq->pool = NULL;
1105                         rxq->iovecs = NULL;
1106                 }
1107                 if (process_private->txq_fds[i] != -1) {
1108                         close(process_private->txq_fds[i]);
1109                         process_private->txq_fds[i] = -1;
1110                 }
1111         }
1112
1113         if (internals->remote_if_index) {
1114                 /* Restore initial remote state */
1115                 int ret = ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
1116                                 &internals->remote_initial_flags);
1117                 if (ret)
1118                         TAP_LOG(ERR, "restore remote state failed: %d", ret);
1119
1120         }
1121
1122         rte_mempool_free(internals->gso_ctx_mp);
1123         internals->gso_ctx_mp = NULL;
1124
1125         if (internals->ka_fd != -1) {
1126                 close(internals->ka_fd);
1127                 internals->ka_fd = -1;
1128         }
1129
1130         /* mac_addrs must not be freed alone because part of dev_private */
1131         dev->data->mac_addrs = NULL;
1132
1133         internals = dev->data->dev_private;
1134         TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
1135                 tuntap_types[internals->type], rte_socket_id());
1136
1137         if (internals->ioctl_sock != -1) {
1138                 close(internals->ioctl_sock);
1139                 internals->ioctl_sock = -1;
1140         }
1141         rte_free(dev->process_private);
1142         if (tap_devices_count == 1)
1143                 rte_mp_action_unregister(TAP_MP_KEY);
1144         tap_devices_count--;
1145         /*
1146          * Since TUN device has no more opened file descriptors
1147          * it will be removed from kernel
1148          */
1149
1150         return 0;
1151 }
1152
1153 static void
1154 tap_rx_queue_release(void *queue)
1155 {
1156         struct rx_queue *rxq = queue;
1157         struct pmd_process_private *process_private;
1158
1159         if (!rxq)
1160                 return;
1161         process_private = rte_eth_devices[rxq->in_port].process_private;
1162         if (process_private->rxq_fds[rxq->queue_id] != -1) {
1163                 close(process_private->rxq_fds[rxq->queue_id]);
1164                 process_private->rxq_fds[rxq->queue_id] = -1;
1165                 tap_rxq_pool_free(rxq->pool);
1166                 rte_free(rxq->iovecs);
1167                 rxq->pool = NULL;
1168                 rxq->iovecs = NULL;
1169         }
1170 }
1171
1172 static void
1173 tap_tx_queue_release(void *queue)
1174 {
1175         struct tx_queue *txq = queue;
1176         struct pmd_process_private *process_private;
1177
1178         if (!txq)
1179                 return;
1180         process_private = rte_eth_devices[txq->out_port].process_private;
1181
1182         if (process_private->txq_fds[txq->queue_id] != -1) {
1183                 close(process_private->txq_fds[txq->queue_id]);
1184                 process_private->txq_fds[txq->queue_id] = -1;
1185         }
1186 }
1187
1188 static int
1189 tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
1190 {
1191         struct rte_eth_link *dev_link = &dev->data->dev_link;
1192         struct pmd_internals *pmd = dev->data->dev_private;
1193         struct ifreq ifr = { .ifr_flags = 0 };
1194
1195         if (pmd->remote_if_index) {
1196                 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
1197                 if (!(ifr.ifr_flags & IFF_UP) ||
1198                     !(ifr.ifr_flags & IFF_RUNNING)) {
1199                         dev_link->link_status = ETH_LINK_DOWN;
1200                         return 0;
1201                 }
1202         }
1203         tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
1204         dev_link->link_status =
1205                 ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
1206                  ETH_LINK_UP :
1207                  ETH_LINK_DOWN);
1208         return 0;
1209 }
1210
1211 static int
1212 tap_promisc_enable(struct rte_eth_dev *dev)
1213 {
1214         struct pmd_internals *pmd = dev->data->dev_private;
1215         struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
1216         int ret;
1217
1218         ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1219         if (ret != 0)
1220                 return ret;
1221
1222         if (pmd->remote_if_index && !pmd->flow_isolate) {
1223                 dev->data->promiscuous = 1;
1224                 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
1225                 if (ret != 0) {
1226                         /* Rollback promisc flag */
1227                         tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1228                         /*
1229                          * rte_eth_dev_promiscuous_enable() rollback
1230                          * dev->data->promiscuous in the case of failure.
1231                          */
1232                         return ret;
1233                 }
1234         }
1235
1236         return 0;
1237 }
1238
1239 static int
1240 tap_promisc_disable(struct rte_eth_dev *dev)
1241 {
1242         struct pmd_internals *pmd = dev->data->dev_private;
1243         struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
1244         int ret;
1245
1246         ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1247         if (ret != 0)
1248                 return ret;
1249
1250         if (pmd->remote_if_index && !pmd->flow_isolate) {
1251                 dev->data->promiscuous = 0;
1252                 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
1253                 if (ret != 0) {
1254                         /* Rollback promisc flag */
1255                         tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1256                         /*
1257                          * rte_eth_dev_promiscuous_disable() rollback
1258                          * dev->data->promiscuous in the case of failure.
1259                          */
1260                         return ret;
1261                 }
1262         }
1263
1264         return 0;
1265 }
1266
1267 static int
1268 tap_allmulti_enable(struct rte_eth_dev *dev)
1269 {
1270         struct pmd_internals *pmd = dev->data->dev_private;
1271         struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
1272         int ret;
1273
1274         ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1275         if (ret != 0)
1276                 return ret;
1277
1278         if (pmd->remote_if_index && !pmd->flow_isolate) {
1279                 dev->data->all_multicast = 1;
1280                 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
1281                 if (ret != 0) {
1282                         /* Rollback allmulti flag */
1283                         tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1284                         /*
1285                          * rte_eth_dev_allmulticast_enable() rollback
1286                          * dev->data->all_multicast in the case of failure.
1287                          */
1288                         return ret;
1289                 }
1290         }
1291
1292         return 0;
1293 }
1294
1295 static int
1296 tap_allmulti_disable(struct rte_eth_dev *dev)
1297 {
1298         struct pmd_internals *pmd = dev->data->dev_private;
1299         struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
1300         int ret;
1301
1302         ret = tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
1303         if (ret != 0)
1304                 return ret;
1305
1306         if (pmd->remote_if_index && !pmd->flow_isolate) {
1307                 dev->data->all_multicast = 0;
1308                 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
1309                 if (ret != 0) {
1310                         /* Rollback allmulti flag */
1311                         tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
1312                         /*
1313                          * rte_eth_dev_allmulticast_disable() rollback
1314                          * dev->data->all_multicast in the case of failure.
1315                          */
1316                         return ret;
1317                 }
1318         }
1319
1320         return 0;
1321 }
1322
1323 static int
1324 tap_mac_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1325 {
1326         struct pmd_internals *pmd = dev->data->dev_private;
1327         enum ioctl_mode mode = LOCAL_ONLY;
1328         struct ifreq ifr;
1329         int ret;
1330
1331         if (pmd->type == ETH_TUNTAP_TYPE_TUN) {
1332                 TAP_LOG(ERR, "%s: can't MAC address for TUN",
1333                         dev->device->name);
1334                 return -ENOTSUP;
1335         }
1336
1337         if (rte_is_zero_ether_addr(mac_addr)) {
1338                 TAP_LOG(ERR, "%s: can't set an empty MAC address",
1339                         dev->device->name);
1340                 return -EINVAL;
1341         }
1342         /* Check the actual current MAC address on the tap netdevice */
1343         ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
1344         if (ret < 0)
1345                 return ret;
1346         if (rte_is_same_ether_addr(
1347                         (struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data,
1348                         mac_addr))
1349                 return 0;
1350         /* Check the current MAC address on the remote */
1351         ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
1352         if (ret < 0)
1353                 return ret;
1354         if (!rte_is_same_ether_addr(
1355                         (struct rte_ether_addr *)&ifr.ifr_hwaddr.sa_data,
1356                         mac_addr))
1357                 mode = LOCAL_AND_REMOTE;
1358         ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1359         rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, RTE_ETHER_ADDR_LEN);
1360         ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
1361         if (ret < 0)
1362                 return ret;
1363         rte_memcpy(&pmd->eth_addr, mac_addr, RTE_ETHER_ADDR_LEN);
1364         if (pmd->remote_if_index && !pmd->flow_isolate) {
1365                 /* Replace MAC redirection rule after a MAC change */
1366                 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
1367                 if (ret < 0) {
1368                         TAP_LOG(ERR,
1369                                 "%s: Couldn't delete MAC redirection rule",
1370                                 dev->device->name);
1371                         return ret;
1372                 }
1373                 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
1374                 if (ret < 0) {
1375                         TAP_LOG(ERR,
1376                                 "%s: Couldn't add MAC redirection rule",
1377                                 dev->device->name);
1378                         return ret;
1379                 }
1380         }
1381
1382         return 0;
1383 }
1384
1385 static int
1386 tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
1387 {
1388         uint32_t gso_types;
1389         char pool_name[64];
1390         struct pmd_internals *pmd = dev->data->dev_private;
1391         int ret;
1392
1393         /* initialize GSO context */
1394         gso_types = DEV_TX_OFFLOAD_TCP_TSO;
1395         if (!pmd->gso_ctx_mp) {
1396                 /*
1397                  * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
1398                  * bytes size per mbuf use this pool for both direct and
1399                  * indirect mbufs
1400                  */
1401                 ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
1402                                 dev->device->name);
1403                 if (ret < 0 || ret >= (int)sizeof(pool_name)) {
1404                         TAP_LOG(ERR,
1405                                 "%s: failed to create mbuf pool name for device %s,"
1406                                 "device name too long or output error, ret: %d\n",
1407                                 pmd->name, dev->device->name, ret);
1408                         return -ENAMETOOLONG;
1409                 }
1410                 pmd->gso_ctx_mp = rte_pktmbuf_pool_create(pool_name,
1411                         TAP_GSO_MBUFS_NUM, TAP_GSO_MBUF_CACHE_SIZE, 0,
1412                         RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
1413                         SOCKET_ID_ANY);
1414                 if (!pmd->gso_ctx_mp) {
1415                         TAP_LOG(ERR,
1416                                 "%s: failed to create mbuf pool for device %s\n",
1417                                 pmd->name, dev->device->name);
1418                         return -1;
1419                 }
1420         }
1421
1422         gso_ctx->direct_pool = pmd->gso_ctx_mp;
1423         gso_ctx->indirect_pool = pmd->gso_ctx_mp;
1424         gso_ctx->gso_types = gso_types;
1425         gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
1426         gso_ctx->flag = 0;
1427
1428         return 0;
1429 }
1430
1431 static int
1432 tap_setup_queue(struct rte_eth_dev *dev,
1433                 struct pmd_internals *internals,
1434                 uint16_t qid,
1435                 int is_rx)
1436 {
1437         int ret;
1438         int *fd;
1439         int *other_fd;
1440         const char *dir;
1441         struct pmd_internals *pmd = dev->data->dev_private;
1442         struct pmd_process_private *process_private = dev->process_private;
1443         struct rx_queue *rx = &internals->rxq[qid];
1444         struct tx_queue *tx = &internals->txq[qid];
1445         struct rte_gso_ctx *gso_ctx;
1446
1447         if (is_rx) {
1448                 fd = &process_private->rxq_fds[qid];
1449                 other_fd = &process_private->txq_fds[qid];
1450                 dir = "rx";
1451                 gso_ctx = NULL;
1452         } else {
1453                 fd = &process_private->txq_fds[qid];
1454                 other_fd = &process_private->rxq_fds[qid];
1455                 dir = "tx";
1456                 gso_ctx = &tx->gso_ctx;
1457         }
1458         if (*fd != -1) {
1459                 /* fd for this queue already exists */
1460                 TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
1461                         pmd->name, *fd, dir, qid);
1462                 gso_ctx = NULL;
1463         } else if (*other_fd != -1) {
1464                 /* Only other_fd exists. dup it */
1465                 *fd = dup(*other_fd);
1466                 if (*fd < 0) {
1467                         *fd = -1;
1468                         TAP_LOG(ERR, "%s: dup() failed.", pmd->name);
1469                         return -1;
1470                 }
1471                 TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)",
1472                         pmd->name, *other_fd, dir, qid, *fd);
1473         } else {
1474                 /* Both RX and TX fds do not exist (equal -1). Create fd */
1475                 *fd = tun_alloc(pmd, 0);
1476                 if (*fd < 0) {
1477                         *fd = -1; /* restore original value */
1478                         TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name);
1479                         return -1;
1480                 }
1481                 TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d",
1482                         pmd->name, dir, qid, *fd);
1483         }
1484
1485         tx->mtu = &dev->data->mtu;
1486         rx->rxmode = &dev->data->dev_conf.rxmode;
1487         if (gso_ctx) {
1488                 ret = tap_gso_ctx_setup(gso_ctx, dev);
1489                 if (ret)
1490                         return -1;
1491         }
1492
1493         tx->type = pmd->type;
1494
1495         return *fd;
1496 }
1497
1498 static int
1499 tap_rx_queue_setup(struct rte_eth_dev *dev,
1500                    uint16_t rx_queue_id,
1501                    uint16_t nb_rx_desc,
1502                    unsigned int socket_id,
1503                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1504                    struct rte_mempool *mp)
1505 {
1506         struct pmd_internals *internals = dev->data->dev_private;
1507         struct pmd_process_private *process_private = dev->process_private;
1508         struct rx_queue *rxq = &internals->rxq[rx_queue_id];
1509         struct rte_mbuf **tmp = &rxq->pool;
1510         long iov_max = sysconf(_SC_IOV_MAX);
1511
1512         if (iov_max <= 0) {
1513                 TAP_LOG(WARNING,
1514                         "_SC_IOV_MAX is not defined. Using %d as default",
1515                         TAP_IOV_DEFAULT_MAX);
1516                 iov_max = TAP_IOV_DEFAULT_MAX;
1517         }
1518         uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
1519         struct iovec (*iovecs)[nb_desc + 1];
1520         int data_off = RTE_PKTMBUF_HEADROOM;
1521         int ret = 0;
1522         int fd;
1523         int i;
1524
1525         if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
1526                 TAP_LOG(WARNING,
1527                         "nb_rx_queues %d too small or mempool NULL",
1528                         dev->data->nb_rx_queues);
1529                 return -1;
1530         }
1531
1532         rxq->mp = mp;
1533         rxq->trigger_seen = 1; /* force initial burst */
1534         rxq->in_port = dev->data->port_id;
1535         rxq->queue_id = rx_queue_id;
1536         rxq->nb_rx_desc = nb_desc;
1537         iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
1538                                     socket_id);
1539         if (!iovecs) {
1540                 TAP_LOG(WARNING,
1541                         "%s: Couldn't allocate %d RX descriptors",
1542                         dev->device->name, nb_desc);
1543                 return -ENOMEM;
1544         }
1545         rxq->iovecs = iovecs;
1546
1547         dev->data->rx_queues[rx_queue_id] = rxq;
1548         fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
1549         if (fd == -1) {
1550                 ret = fd;
1551                 goto error;
1552         }
1553
1554         (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
1555         (*rxq->iovecs)[0].iov_base = &rxq->pi;
1556
1557         for (i = 1; i <= nb_desc; i++) {
1558                 *tmp = rte_pktmbuf_alloc(rxq->mp);
1559                 if (!*tmp) {
1560                         TAP_LOG(WARNING,
1561                                 "%s: couldn't allocate memory for queue %d",
1562                                 dev->device->name, rx_queue_id);
1563                         ret = -ENOMEM;
1564                         goto error;
1565                 }
1566                 (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
1567                 (*rxq->iovecs)[i].iov_base =
1568                         (char *)(*tmp)->buf_addr + data_off;
1569                 data_off = 0;
1570                 tmp = &(*tmp)->next;
1571         }
1572
1573         TAP_LOG(DEBUG, "  RX TUNTAP device name %s, qid %d on fd %d",
1574                 internals->name, rx_queue_id,
1575                 process_private->rxq_fds[rx_queue_id]);
1576
1577         return 0;
1578
1579 error:
1580         tap_rxq_pool_free(rxq->pool);
1581         rxq->pool = NULL;
1582         rte_free(rxq->iovecs);
1583         rxq->iovecs = NULL;
1584         return ret;
1585 }
1586
1587 static int
1588 tap_tx_queue_setup(struct rte_eth_dev *dev,
1589                    uint16_t tx_queue_id,
1590                    uint16_t nb_tx_desc __rte_unused,
1591                    unsigned int socket_id __rte_unused,
1592                    const struct rte_eth_txconf *tx_conf)
1593 {
1594         struct pmd_internals *internals = dev->data->dev_private;
1595         struct pmd_process_private *process_private = dev->process_private;
1596         struct tx_queue *txq;
1597         int ret;
1598         uint64_t offloads;
1599
1600         if (tx_queue_id >= dev->data->nb_tx_queues)
1601                 return -1;
1602         dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
1603         txq = dev->data->tx_queues[tx_queue_id];
1604         txq->out_port = dev->data->port_id;
1605         txq->queue_id = tx_queue_id;
1606
1607         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1608         txq->csum = !!(offloads &
1609                         (DEV_TX_OFFLOAD_IPV4_CKSUM |
1610                          DEV_TX_OFFLOAD_UDP_CKSUM |
1611                          DEV_TX_OFFLOAD_TCP_CKSUM));
1612
1613         ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
1614         if (ret == -1)
1615                 return -1;
1616         TAP_LOG(DEBUG,
1617                 "  TX TUNTAP device name %s, qid %d on fd %d csum %s",
1618                 internals->name, tx_queue_id,
1619                 process_private->txq_fds[tx_queue_id],
1620                 txq->csum ? "on" : "off");
1621
1622         return 0;
1623 }
1624
1625 static int
1626 tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1627 {
1628         struct pmd_internals *pmd = dev->data->dev_private;
1629         struct ifreq ifr = { .ifr_mtu = mtu };
1630         int err = 0;
1631
1632         err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
1633         if (!err)
1634                 dev->data->mtu = mtu;
1635
1636         return err;
1637 }
1638
1639 static int
1640 tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
1641                      struct rte_ether_addr *mc_addr_set __rte_unused,
1642                      uint32_t nb_mc_addr __rte_unused)
1643 {
1644         /*
1645          * Nothing to do actually: the tap has no filtering whatsoever, every
1646          * packet is received.
1647          */
1648         return 0;
1649 }
1650
1651 static int
1652 tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
1653 {
1654         struct rte_eth_dev *dev = arg;
1655         struct pmd_internals *pmd = dev->data->dev_private;
1656         struct ifinfomsg *info = NLMSG_DATA(nh);
1657
1658         if (nh->nlmsg_type != RTM_NEWLINK ||
1659             (info->ifi_index != pmd->if_index &&
1660              info->ifi_index != pmd->remote_if_index))
1661                 return 0;
1662         return tap_link_update(dev, 0);
1663 }
1664
1665 static void
1666 tap_dev_intr_handler(void *cb_arg)
1667 {
1668         struct rte_eth_dev *dev = cb_arg;
1669         struct pmd_internals *pmd = dev->data->dev_private;
1670
1671         tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
1672 }
1673
1674 static int
1675 tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
1676 {
1677         struct pmd_internals *pmd = dev->data->dev_private;
1678         int ret;
1679
1680         /* In any case, disable interrupt if the conf is no longer there. */
1681         if (!dev->data->dev_conf.intr_conf.lsc) {
1682                 if (pmd->intr_handle.fd != -1) {
1683                         goto clean;
1684                 }
1685                 return 0;
1686         }
1687         if (set) {
1688                 pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK);
1689                 if (unlikely(pmd->intr_handle.fd == -1))
1690                         return -EBADF;
1691                 return rte_intr_callback_register(
1692                         &pmd->intr_handle, tap_dev_intr_handler, dev);
1693         }
1694
1695 clean:
1696         do {
1697                 ret = rte_intr_callback_unregister(&pmd->intr_handle,
1698                         tap_dev_intr_handler, dev);
1699                 if (ret >= 0) {
1700                         break;
1701                 } else if (ret == -EAGAIN) {
1702                         rte_delay_ms(100);
1703                 } else {
1704                         TAP_LOG(ERR, "intr callback unregister failed: %d",
1705                                      ret);
1706                         break;
1707                 }
1708         } while (true);
1709
1710         tap_nl_final(pmd->intr_handle.fd);
1711         pmd->intr_handle.fd = -1;
1712
1713         return 0;
1714 }
1715
1716 static int
1717 tap_intr_handle_set(struct rte_eth_dev *dev, int set)
1718 {
1719         int err;
1720
1721         err = tap_lsc_intr_handle_set(dev, set);
1722         if (err < 0) {
1723                 if (!set)
1724                         tap_rx_intr_vec_set(dev, 0);
1725                 return err;
1726         }
1727         err = tap_rx_intr_vec_set(dev, set);
1728         if (err && set)
1729                 tap_lsc_intr_handle_set(dev, 0);
1730         return err;
1731 }
1732
1733 static const uint32_t*
1734 tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1735 {
1736         static const uint32_t ptypes[] = {
1737                 RTE_PTYPE_INNER_L2_ETHER,
1738                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1739                 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1740                 RTE_PTYPE_INNER_L3_IPV4,
1741                 RTE_PTYPE_INNER_L3_IPV4_EXT,
1742                 RTE_PTYPE_INNER_L3_IPV6,
1743                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1744                 RTE_PTYPE_INNER_L4_FRAG,
1745                 RTE_PTYPE_INNER_L4_UDP,
1746                 RTE_PTYPE_INNER_L4_TCP,
1747                 RTE_PTYPE_INNER_L4_SCTP,
1748                 RTE_PTYPE_L2_ETHER,
1749                 RTE_PTYPE_L2_ETHER_VLAN,
1750                 RTE_PTYPE_L2_ETHER_QINQ,
1751                 RTE_PTYPE_L3_IPV4,
1752                 RTE_PTYPE_L3_IPV4_EXT,
1753                 RTE_PTYPE_L3_IPV6_EXT,
1754                 RTE_PTYPE_L3_IPV6,
1755                 RTE_PTYPE_L4_FRAG,
1756                 RTE_PTYPE_L4_UDP,
1757                 RTE_PTYPE_L4_TCP,
1758                 RTE_PTYPE_L4_SCTP,
1759         };
1760
1761         return ptypes;
1762 }
1763
1764 static int
1765 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
1766                   struct rte_eth_fc_conf *fc_conf)
1767 {
1768         fc_conf->mode = RTE_FC_NONE;
1769         return 0;
1770 }
1771
1772 static int
1773 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
1774                   struct rte_eth_fc_conf *fc_conf)
1775 {
1776         if (fc_conf->mode != RTE_FC_NONE)
1777                 return -ENOTSUP;
1778         return 0;
1779 }
1780
1781 /**
1782  * DPDK callback to update the RSS hash configuration.
1783  *
1784  * @param dev
1785  *   Pointer to Ethernet device structure.
1786  * @param[in] rss_conf
1787  *   RSS configuration data.
1788  *
1789  * @return
1790  *   0 on success, a negative errno value otherwise and rte_errno is set.
1791  */
1792 static int
1793 tap_rss_hash_update(struct rte_eth_dev *dev,
1794                 struct rte_eth_rss_conf *rss_conf)
1795 {
1796         if (rss_conf->rss_hf & TAP_RSS_HF_MASK) {
1797                 rte_errno = EINVAL;
1798                 return -rte_errno;
1799         }
1800         if (rss_conf->rss_key && rss_conf->rss_key_len) {
1801                 /*
1802                  * Currently TAP RSS key is hard coded
1803                  * and cannot be updated
1804                  */
1805                 TAP_LOG(ERR,
1806                         "port %u RSS key cannot be updated",
1807                         dev->data->port_id);
1808                 rte_errno = EINVAL;
1809                 return -rte_errno;
1810         }
1811         return 0;
1812 }
1813
1814 static int
1815 tap_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1816 {
1817         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1818
1819         return 0;
1820 }
1821
1822 static int
1823 tap_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1824 {
1825         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1826
1827         return 0;
1828 }
1829
1830 static int
1831 tap_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1832 {
1833         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1834
1835         return 0;
1836 }
1837
1838 static int
1839 tap_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1840 {
1841         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1842
1843         return 0;
1844 }
1845 static const struct eth_dev_ops ops = {
1846         .dev_start              = tap_dev_start,
1847         .dev_stop               = tap_dev_stop,
1848         .dev_close              = tap_dev_close,
1849         .dev_configure          = tap_dev_configure,
1850         .dev_infos_get          = tap_dev_info,
1851         .rx_queue_setup         = tap_rx_queue_setup,
1852         .tx_queue_setup         = tap_tx_queue_setup,
1853         .rx_queue_start         = tap_rx_queue_start,
1854         .tx_queue_start         = tap_tx_queue_start,
1855         .rx_queue_stop          = tap_rx_queue_stop,
1856         .tx_queue_stop          = tap_tx_queue_stop,
1857         .rx_queue_release       = tap_rx_queue_release,
1858         .tx_queue_release       = tap_tx_queue_release,
1859         .flow_ctrl_get          = tap_flow_ctrl_get,
1860         .flow_ctrl_set          = tap_flow_ctrl_set,
1861         .link_update            = tap_link_update,
1862         .dev_set_link_up        = tap_link_set_up,
1863         .dev_set_link_down      = tap_link_set_down,
1864         .promiscuous_enable     = tap_promisc_enable,
1865         .promiscuous_disable    = tap_promisc_disable,
1866         .allmulticast_enable    = tap_allmulti_enable,
1867         .allmulticast_disable   = tap_allmulti_disable,
1868         .mac_addr_set           = tap_mac_set,
1869         .mtu_set                = tap_mtu_set,
1870         .set_mc_addr_list       = tap_set_mc_addr_list,
1871         .stats_get              = tap_stats_get,
1872         .stats_reset            = tap_stats_reset,
1873         .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
1874         .rss_hash_update        = tap_rss_hash_update,
1875         .flow_ops_get           = tap_dev_flow_ops_get,
1876 };
1877
1878 static int
1879 eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
1880                    char *remote_iface, struct rte_ether_addr *mac_addr,
1881                    enum rte_tuntap_type type)
1882 {
1883         int numa_node = rte_socket_id();
1884         struct rte_eth_dev *dev;
1885         struct pmd_internals *pmd;
1886         struct pmd_process_private *process_private;
1887         const char *tuntap_name = tuntap_types[type];
1888         struct rte_eth_dev_data *data;
1889         struct ifreq ifr;
1890         int i;
1891
1892         TAP_LOG(DEBUG, "%s device on numa %u", tuntap_name, rte_socket_id());
1893
1894         dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1895         if (!dev) {
1896                 TAP_LOG(ERR, "%s Unable to allocate device struct",
1897                                 tuntap_name);
1898                 goto error_exit_nodev;
1899         }
1900
1901         process_private = (struct pmd_process_private *)
1902                 rte_zmalloc_socket(tap_name, sizeof(struct pmd_process_private),
1903                         RTE_CACHE_LINE_SIZE, dev->device->numa_node);
1904
1905         if (process_private == NULL) {
1906                 TAP_LOG(ERR, "Failed to alloc memory for process private");
1907                 return -1;
1908         }
1909         pmd = dev->data->dev_private;
1910         dev->process_private = process_private;
1911         pmd->dev = dev;
1912         strlcpy(pmd->name, tap_name, sizeof(pmd->name));
1913         pmd->type = type;
1914         pmd->ka_fd = -1;
1915         pmd->nlsk_fd = -1;
1916         pmd->gso_ctx_mp = NULL;
1917
1918         pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
1919         if (pmd->ioctl_sock == -1) {
1920                 TAP_LOG(ERR,
1921                         "%s Unable to get a socket for management: %s",
1922                         tuntap_name, strerror(errno));
1923                 goto error_exit;
1924         }
1925
1926         /* Setup some default values */
1927         data = dev->data;
1928         data->dev_private = pmd;
1929         data->dev_flags = RTE_ETH_DEV_INTR_LSC |
1930                                 RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1931         data->numa_node = numa_node;
1932
1933         data->dev_link = pmd_link;
1934         data->mac_addrs = &pmd->eth_addr;
1935         /* Set the number of RX and TX queues */
1936         data->nb_rx_queues = 0;
1937         data->nb_tx_queues = 0;
1938
1939         dev->dev_ops = &ops;
1940         dev->rx_pkt_burst = pmd_rx_burst;
1941         dev->tx_pkt_burst = pmd_tx_burst;
1942
1943         pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
1944         pmd->intr_handle.fd = -1;
1945         dev->intr_handle = &pmd->intr_handle;
1946
1947         /* Presetup the fds to -1 as being not valid */
1948         for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1949                 process_private->rxq_fds[i] = -1;
1950                 process_private->txq_fds[i] = -1;
1951         }
1952
1953         if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
1954                 if (rte_is_zero_ether_addr(mac_addr))
1955                         rte_eth_random_addr((uint8_t *)&pmd->eth_addr);
1956                 else
1957                         rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr));
1958         }
1959
1960         /*
1961          * Allocate a TUN device keep-alive file descriptor that will only be
1962          * closed when the TUN device itself is closed or removed.
1963          * This keep-alive file descriptor will guarantee that the TUN device
1964          * exists even when all of its queues are closed
1965          */
1966         pmd->ka_fd = tun_alloc(pmd, 1);
1967         if (pmd->ka_fd == -1) {
1968                 TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
1969                 goto error_exit;
1970         }
1971         TAP_LOG(DEBUG, "allocated %s", pmd->name);
1972
1973         ifr.ifr_mtu = dev->data->mtu;
1974         if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
1975                 goto error_exit;
1976
1977         if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
1978                 memset(&ifr, 0, sizeof(struct ifreq));
1979                 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1980                 rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
1981                                 RTE_ETHER_ADDR_LEN);
1982                 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
1983                         goto error_exit;
1984         }
1985
1986         /*
1987          * Set up everything related to rte_flow:
1988          * - netlink socket
1989          * - tap / remote if_index
1990          * - mandatory QDISCs
1991          * - rte_flow actual/implicit lists
1992          * - implicit rules
1993          */
1994         pmd->nlsk_fd = tap_nl_init(0);
1995         if (pmd->nlsk_fd == -1) {
1996                 TAP_LOG(WARNING, "%s: failed to create netlink socket.",
1997                         pmd->name);
1998                 goto disable_rte_flow;
1999         }
2000         pmd->if_index = if_nametoindex(pmd->name);
2001         if (!pmd->if_index) {
2002                 TAP_LOG(ERR, "%s: failed to get if_index.", pmd->name);
2003                 goto disable_rte_flow;
2004         }
2005         if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
2006                 TAP_LOG(ERR, "%s: failed to create multiq qdisc.",
2007                         pmd->name);
2008                 goto disable_rte_flow;
2009         }
2010         if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
2011                 TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
2012                         pmd->name);
2013                 goto disable_rte_flow;
2014         }
2015         LIST_INIT(&pmd->flows);
2016
2017         if (strlen(remote_iface)) {
2018                 pmd->remote_if_index = if_nametoindex(remote_iface);
2019                 if (!pmd->remote_if_index) {
2020                         TAP_LOG(ERR, "%s: failed to get %s if_index.",
2021                                 pmd->name, remote_iface);
2022                         goto error_remote;
2023                 }
2024                 strlcpy(pmd->remote_iface, remote_iface, RTE_ETH_NAME_MAX_LEN);
2025
2026                 /* Save state of remote device */
2027                 tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
2028
2029                 /* Replicate remote MAC address */
2030                 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
2031                         TAP_LOG(ERR, "%s: failed to get %s MAC address.",
2032                                 pmd->name, pmd->remote_iface);
2033                         goto error_remote;
2034                 }
2035                 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
2036                            RTE_ETHER_ADDR_LEN);
2037                 /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
2038                 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
2039                         TAP_LOG(ERR, "%s: failed to get %s MAC address.",
2040                                 pmd->name, remote_iface);
2041                         goto error_remote;
2042                 }
2043
2044                 /*
2045                  * Flush usually returns negative value because it tries to
2046                  * delete every QDISC (and on a running device, one QDISC at
2047                  * least is needed). Ignore negative return value.
2048                  */
2049                 qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
2050                 if (qdisc_create_ingress(pmd->nlsk_fd,
2051                                          pmd->remote_if_index) < 0) {
2052                         TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
2053                                 pmd->remote_iface);
2054                         goto error_remote;
2055                 }
2056                 LIST_INIT(&pmd->implicit_flows);
2057                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
2058                     tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
2059                     tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
2060                     tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
2061                         TAP_LOG(ERR,
2062                                 "%s: failed to create implicit rules.",
2063                                 pmd->name);
2064                         goto error_remote;
2065                 }
2066         }
2067
2068         rte_eth_dev_probing_finish(dev);
2069         return 0;
2070
2071 disable_rte_flow:
2072         TAP_LOG(ERR, " Disabling rte flow support: %s(%d)",
2073                 strerror(errno), errno);
2074         if (strlen(remote_iface)) {
2075                 TAP_LOG(ERR, "Remote feature requires flow support.");
2076                 goto error_exit;
2077         }
2078         rte_eth_dev_probing_finish(dev);
2079         return 0;
2080
2081 error_remote:
2082         TAP_LOG(ERR, " Can't set up remote feature: %s(%d)",
2083                 strerror(errno), errno);
2084         tap_flow_implicit_flush(pmd, NULL);
2085
2086 error_exit:
2087         if (pmd->nlsk_fd != -1)
2088                 close(pmd->nlsk_fd);
2089         if (pmd->ka_fd != -1)
2090                 close(pmd->ka_fd);
2091         if (pmd->ioctl_sock != -1)
2092                 close(pmd->ioctl_sock);
2093         /* mac_addrs must not be freed alone because part of dev_private */
2094         dev->data->mac_addrs = NULL;
2095         rte_eth_dev_release_port(dev);
2096
2097 error_exit_nodev:
2098         TAP_LOG(ERR, "%s Unable to initialize %s",
2099                 tuntap_name, rte_vdev_device_name(vdev));
2100
2101         return -EINVAL;
2102 }
2103
2104 /* make sure name is a possible Linux network device name */
2105 static bool
2106 is_valid_iface(const char *name)
2107 {
2108         if (*name == '\0')
2109                 return false;
2110
2111         if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
2112                 return false;
2113
2114         while (*name) {
2115                 if (*name == '/' || *name == ':' || isspace(*name))
2116                         return false;
2117                 name++;
2118         }
2119         return true;
2120 }
2121
2122 static int
2123 set_interface_name(const char *key __rte_unused,
2124                    const char *value,
2125                    void *extra_args)
2126 {
2127         char *name = (char *)extra_args;
2128
2129         if (value) {
2130                 if (!is_valid_iface(value)) {
2131                         TAP_LOG(ERR, "TAP invalid remote interface name (%s)",
2132                                 value);
2133                         return -1;
2134                 }
2135                 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
2136         } else {
2137                 /* use tap%d which causes kernel to choose next available */
2138                 strlcpy(name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2139         }
2140         return 0;
2141 }
2142
2143 static int
2144 set_remote_iface(const char *key __rte_unused,
2145                  const char *value,
2146                  void *extra_args)
2147 {
2148         char *name = (char *)extra_args;
2149
2150         if (value) {
2151                 if (!is_valid_iface(value)) {
2152                         TAP_LOG(ERR, "TAP invalid remote interface name (%s)",
2153                                 value);
2154                         return -1;
2155                 }
2156                 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
2157         }
2158
2159         return 0;
2160 }
2161
2162 static int parse_user_mac(struct rte_ether_addr *user_mac,
2163                 const char *value)
2164 {
2165         unsigned int index = 0;
2166         char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
2167
2168         if (user_mac == NULL || value == NULL)
2169                 return 0;
2170
2171         strlcpy(mac_temp, value, sizeof(mac_temp));
2172         mac_byte = strtok(mac_temp, ":");
2173
2174         while ((mac_byte != NULL) &&
2175                         (strlen(mac_byte) <= 2) &&
2176                         (strlen(mac_byte) == strspn(mac_byte,
2177                                         ETH_TAP_CMP_MAC_FMT))) {
2178                 user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
2179                 mac_byte = strtok(NULL, ":");
2180         }
2181
2182         return index;
2183 }
2184
2185 static int
2186 set_mac_type(const char *key __rte_unused,
2187              const char *value,
2188              void *extra_args)
2189 {
2190         struct rte_ether_addr *user_mac = extra_args;
2191
2192         if (!value)
2193                 return 0;
2194
2195         if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
2196                 static int iface_idx;
2197
2198                 /* fixed mac = 00:64:74:61:70:<iface_idx> */
2199                 memcpy((char *)user_mac->addr_bytes, "\0dtap",
2200                         RTE_ETHER_ADDR_LEN);
2201                 user_mac->addr_bytes[RTE_ETHER_ADDR_LEN - 1] =
2202                         iface_idx++ + '0';
2203                 goto success;
2204         }
2205
2206         if (parse_user_mac(user_mac, value) != 6)
2207                 goto error;
2208 success:
2209         TAP_LOG(DEBUG, "TAP user MAC param (%s)", value);
2210         return 0;
2211
2212 error:
2213         TAP_LOG(ERR, "TAP user MAC (%s) is not in format (%s|%s)",
2214                 value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
2215         return -1;
2216 }
2217
2218 /*
2219  * Open a TUN interface device. TUN PMD
2220  * 1) sets tap_type as false
2221  * 2) intakes iface as argument.
2222  * 3) as interface is virtual set speed to 10G
2223  */
2224 static int
2225 rte_pmd_tun_probe(struct rte_vdev_device *dev)
2226 {
2227         const char *name, *params;
2228         int ret;
2229         struct rte_kvargs *kvlist = NULL;
2230         char tun_name[RTE_ETH_NAME_MAX_LEN];
2231         char remote_iface[RTE_ETH_NAME_MAX_LEN];
2232         struct rte_eth_dev *eth_dev;
2233
2234         name = rte_vdev_device_name(dev);
2235         params = rte_vdev_device_args(dev);
2236         memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
2237
2238         if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
2239             strlen(params) == 0) {
2240                 eth_dev = rte_eth_dev_attach_secondary(name);
2241                 if (!eth_dev) {
2242                         TAP_LOG(ERR, "Failed to probe %s", name);
2243                         return -1;
2244                 }
2245                 eth_dev->dev_ops = &ops;
2246                 eth_dev->device = &dev->device;
2247                 rte_eth_dev_probing_finish(eth_dev);
2248                 return 0;
2249         }
2250
2251         /* use tun%d which causes kernel to choose next available */
2252         strlcpy(tun_name, DEFAULT_TUN_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2253
2254         if (params && (params[0] != '\0')) {
2255                 TAP_LOG(DEBUG, "parameters (%s)", params);
2256
2257                 kvlist = rte_kvargs_parse(params, valid_arguments);
2258                 if (kvlist) {
2259                         if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
2260                                 ret = rte_kvargs_process(kvlist,
2261                                         ETH_TAP_IFACE_ARG,
2262                                         &set_interface_name,
2263                                         tun_name);
2264
2265                                 if (ret == -1)
2266                                         goto leave;
2267                         }
2268                 }
2269         }
2270         pmd_link.link_speed = ETH_SPEED_NUM_10G;
2271
2272         TAP_LOG(DEBUG, "Initializing pmd_tun for %s", name);
2273
2274         ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0,
2275                                  ETH_TUNTAP_TYPE_TUN);
2276
2277 leave:
2278         if (ret == -1) {
2279                 TAP_LOG(ERR, "Failed to create pmd for %s as %s",
2280                         name, tun_name);
2281         }
2282         rte_kvargs_free(kvlist);
2283
2284         return ret;
2285 }
2286
2287 /* Request queue file descriptors from secondary to primary. */
2288 static int
2289 tap_mp_attach_queues(const char *port_name, struct rte_eth_dev *dev)
2290 {
2291         int ret;
2292         struct timespec timeout = {.tv_sec = 1, .tv_nsec = 0};
2293         struct rte_mp_msg request, *reply;
2294         struct rte_mp_reply replies;
2295         struct ipc_queues *request_param = (struct ipc_queues *)request.param;
2296         struct ipc_queues *reply_param;
2297         struct pmd_process_private *process_private = dev->process_private;
2298         int queue, fd_iterator;
2299
2300         /* Prepare the request */
2301         memset(&request, 0, sizeof(request));
2302         strlcpy(request.name, TAP_MP_KEY, sizeof(request.name));
2303         strlcpy(request_param->port_name, port_name,
2304                 sizeof(request_param->port_name));
2305         request.len_param = sizeof(*request_param);
2306         /* Send request and receive reply */
2307         ret = rte_mp_request_sync(&request, &replies, &timeout);
2308         if (ret < 0 || replies.nb_received != 1) {
2309                 TAP_LOG(ERR, "Failed to request queues from primary: %d",
2310                         rte_errno);
2311                 return -1;
2312         }
2313         reply = &replies.msgs[0];
2314         reply_param = (struct ipc_queues *)reply->param;
2315         TAP_LOG(DEBUG, "Received IPC reply for %s", reply_param->port_name);
2316
2317         /* Attach the queues from received file descriptors */
2318         if (reply_param->rxq_count + reply_param->txq_count != reply->num_fds) {
2319                 TAP_LOG(ERR, "Unexpected number of fds received");
2320                 return -1;
2321         }
2322
2323         dev->data->nb_rx_queues = reply_param->rxq_count;
2324         dev->data->nb_tx_queues = reply_param->txq_count;
2325         fd_iterator = 0;
2326         for (queue = 0; queue < reply_param->rxq_count; queue++)
2327                 process_private->rxq_fds[queue] = reply->fds[fd_iterator++];
2328         for (queue = 0; queue < reply_param->txq_count; queue++)
2329                 process_private->txq_fds[queue] = reply->fds[fd_iterator++];
2330         free(reply);
2331         return 0;
2332 }
2333
2334 /* Send the queue file descriptors from the primary process to secondary. */
2335 static int
2336 tap_mp_sync_queues(const struct rte_mp_msg *request, const void *peer)
2337 {
2338         struct rte_eth_dev *dev;
2339         struct pmd_process_private *process_private;
2340         struct rte_mp_msg reply;
2341         const struct ipc_queues *request_param =
2342                 (const struct ipc_queues *)request->param;
2343         struct ipc_queues *reply_param =
2344                 (struct ipc_queues *)reply.param;
2345         uint16_t port_id;
2346         int queue;
2347         int ret;
2348
2349         /* Get requested port */
2350         TAP_LOG(DEBUG, "Received IPC request for %s", request_param->port_name);
2351         ret = rte_eth_dev_get_port_by_name(request_param->port_name, &port_id);
2352         if (ret) {
2353                 TAP_LOG(ERR, "Failed to get port id for %s",
2354                         request_param->port_name);
2355                 return -1;
2356         }
2357         dev = &rte_eth_devices[port_id];
2358         process_private = dev->process_private;
2359
2360         /* Fill file descriptors for all queues */
2361         reply.num_fds = 0;
2362         reply_param->rxq_count = 0;
2363         if (dev->data->nb_rx_queues + dev->data->nb_tx_queues >
2364                         RTE_MP_MAX_FD_NUM){
2365                 TAP_LOG(ERR, "Number of rx/tx queues exceeds max number of fds");
2366                 return -1;
2367         }
2368
2369         for (queue = 0; queue < dev->data->nb_rx_queues; queue++) {
2370                 reply.fds[reply.num_fds++] = process_private->rxq_fds[queue];
2371                 reply_param->rxq_count++;
2372         }
2373         RTE_ASSERT(reply_param->rxq_count == dev->data->nb_rx_queues);
2374
2375         reply_param->txq_count = 0;
2376         for (queue = 0; queue < dev->data->nb_tx_queues; queue++) {
2377                 reply.fds[reply.num_fds++] = process_private->txq_fds[queue];
2378                 reply_param->txq_count++;
2379         }
2380         RTE_ASSERT(reply_param->txq_count == dev->data->nb_tx_queues);
2381
2382         /* Send reply */
2383         strlcpy(reply.name, request->name, sizeof(reply.name));
2384         strlcpy(reply_param->port_name, request_param->port_name,
2385                 sizeof(reply_param->port_name));
2386         reply.len_param = sizeof(*reply_param);
2387         if (rte_mp_reply(&reply, peer) < 0) {
2388                 TAP_LOG(ERR, "Failed to reply an IPC request to sync queues");
2389                 return -1;
2390         }
2391         return 0;
2392 }
2393
2394 /* Open a TAP interface device.
2395  */
2396 static int
2397 rte_pmd_tap_probe(struct rte_vdev_device *dev)
2398 {
2399         const char *name, *params;
2400         int ret;
2401         struct rte_kvargs *kvlist = NULL;
2402         int speed;
2403         char tap_name[RTE_ETH_NAME_MAX_LEN];
2404         char remote_iface[RTE_ETH_NAME_MAX_LEN];
2405         struct rte_ether_addr user_mac = { .addr_bytes = {0} };
2406         struct rte_eth_dev *eth_dev;
2407         int tap_devices_count_increased = 0;
2408
2409         name = rte_vdev_device_name(dev);
2410         params = rte_vdev_device_args(dev);
2411
2412         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
2413                 eth_dev = rte_eth_dev_attach_secondary(name);
2414                 if (!eth_dev) {
2415                         TAP_LOG(ERR, "Failed to probe %s", name);
2416                         return -1;
2417                 }
2418                 eth_dev->dev_ops = &ops;
2419                 eth_dev->device = &dev->device;
2420                 eth_dev->rx_pkt_burst = pmd_rx_burst;
2421                 eth_dev->tx_pkt_burst = pmd_tx_burst;
2422                 if (!rte_eal_primary_proc_alive(NULL)) {
2423                         TAP_LOG(ERR, "Primary process is missing");
2424                         return -1;
2425                 }
2426                 eth_dev->process_private = (struct pmd_process_private *)
2427                         rte_zmalloc_socket(name,
2428                                 sizeof(struct pmd_process_private),
2429                                 RTE_CACHE_LINE_SIZE,
2430                                 eth_dev->device->numa_node);
2431                 if (eth_dev->process_private == NULL) {
2432                         TAP_LOG(ERR,
2433                                 "Failed to alloc memory for process private");
2434                         return -1;
2435                 }
2436
2437                 ret = tap_mp_attach_queues(name, eth_dev);
2438                 if (ret != 0)
2439                         return -1;
2440                 rte_eth_dev_probing_finish(eth_dev);
2441                 return 0;
2442         }
2443
2444         speed = ETH_SPEED_NUM_10G;
2445
2446         /* use tap%d which causes kernel to choose next available */
2447         strlcpy(tap_name, DEFAULT_TAP_NAME "%d", RTE_ETH_NAME_MAX_LEN);
2448         memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
2449
2450         if (params && (params[0] != '\0')) {
2451                 TAP_LOG(DEBUG, "parameters (%s)", params);
2452
2453                 kvlist = rte_kvargs_parse(params, valid_arguments);
2454                 if (kvlist) {
2455                         if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
2456                                 ret = rte_kvargs_process(kvlist,
2457                                                          ETH_TAP_IFACE_ARG,
2458                                                          &set_interface_name,
2459                                                          tap_name);
2460                                 if (ret == -1)
2461                                         goto leave;
2462                         }
2463
2464                         if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
2465                                 ret = rte_kvargs_process(kvlist,
2466                                                          ETH_TAP_REMOTE_ARG,
2467                                                          &set_remote_iface,
2468                                                          remote_iface);
2469                                 if (ret == -1)
2470                                         goto leave;
2471                         }
2472
2473                         if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
2474                                 ret = rte_kvargs_process(kvlist,
2475                                                          ETH_TAP_MAC_ARG,
2476                                                          &set_mac_type,
2477                                                          &user_mac);
2478                                 if (ret == -1)
2479                                         goto leave;
2480                         }
2481                 }
2482         }
2483         pmd_link.link_speed = speed;
2484
2485         TAP_LOG(DEBUG, "Initializing pmd_tap for %s", name);
2486
2487         /* Register IPC feed callback */
2488         if (!tap_devices_count) {
2489                 ret = rte_mp_action_register(TAP_MP_KEY, tap_mp_sync_queues);
2490                 if (ret < 0 && rte_errno != ENOTSUP) {
2491                         TAP_LOG(ERR, "tap: Failed to register IPC callback: %s",
2492                                 strerror(rte_errno));
2493                         goto leave;
2494                 }
2495         }
2496         tap_devices_count++;
2497         tap_devices_count_increased = 1;
2498         ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac,
2499                 ETH_TUNTAP_TYPE_TAP);
2500
2501 leave:
2502         if (ret == -1) {
2503                 TAP_LOG(ERR, "Failed to create pmd for %s as %s",
2504                         name, tap_name);
2505                 if (tap_devices_count_increased == 1) {
2506                         if (tap_devices_count == 1)
2507                                 rte_mp_action_unregister(TAP_MP_KEY);
2508                         tap_devices_count--;
2509                 }
2510         }
2511         rte_kvargs_free(kvlist);
2512
2513         return ret;
2514 }
2515
2516 /* detach a TUNTAP device.
2517  */
2518 static int
2519 rte_pmd_tap_remove(struct rte_vdev_device *dev)
2520 {
2521         struct rte_eth_dev *eth_dev = NULL;
2522
2523         /* find the ethdev entry */
2524         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
2525         if (!eth_dev)
2526                 return 0;
2527
2528         tap_dev_close(eth_dev);
2529         rte_eth_dev_release_port(eth_dev);
2530
2531         return 0;
2532 }
2533
2534 static struct rte_vdev_driver pmd_tun_drv = {
2535         .probe = rte_pmd_tun_probe,
2536         .remove = rte_pmd_tap_remove,
2537 };
2538
2539 static struct rte_vdev_driver pmd_tap_drv = {
2540         .probe = rte_pmd_tap_probe,
2541         .remove = rte_pmd_tap_remove,
2542 };
2543
2544 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
2545 RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv);
2546 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
2547 RTE_PMD_REGISTER_PARAM_STRING(net_tun,
2548                               ETH_TAP_IFACE_ARG "=<string> ");
2549 RTE_PMD_REGISTER_PARAM_STRING(net_tap,
2550                               ETH_TAP_IFACE_ARG "=<string> "
2551                               ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
2552                               ETH_TAP_REMOTE_ARG "=<string>");
2553 RTE_LOG_REGISTER_DEFAULT(tap_logtype, NOTICE);