net/tap: fix icc build
[dpdk.git] / drivers / net / tap / rte_eth_tap.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <rte_atomic.h>
6 #include <rte_branch_prediction.h>
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_mbuf.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_bus_vdev.h>
14 #include <rte_kvargs.h>
15 #include <rte_net.h>
16 #include <rte_debug.h>
17 #include <rte_ip.h>
18 #include <rte_string_fns.h>
19
20 #include <sys/types.h>
21 #include <sys/stat.h>
22 #include <sys/socket.h>
23 #include <sys/ioctl.h>
24 #include <sys/utsname.h>
25 #include <sys/mman.h>
26 #include <errno.h>
27 #include <signal.h>
28 #include <stdbool.h>
29 #include <stdint.h>
30 #include <sys/uio.h>
31 #include <unistd.h>
32 #include <arpa/inet.h>
33 #include <net/if.h>
34 #include <linux/if_tun.h>
35 #include <linux/if_ether.h>
36 #include <fcntl.h>
37
38 #include <rte_eth_tap.h>
39 #include <tap_flow.h>
40 #include <tap_netlink.h>
41 #include <tap_tcmsgs.h>
42
43 /* Linux based path to the TUN device */
44 #define TUN_TAP_DEV_PATH        "/dev/net/tun"
45 #define DEFAULT_TAP_NAME        "dtap"
46 #define DEFAULT_TUN_NAME        "dtun"
47
48 #define ETH_TAP_IFACE_ARG       "iface"
49 #define ETH_TAP_REMOTE_ARG      "remote"
50 #define ETH_TAP_MAC_ARG         "mac"
51 #define ETH_TAP_MAC_FIXED       "fixed"
52
53 #define ETH_TAP_USR_MAC_FMT     "xx:xx:xx:xx:xx:xx"
54 #define ETH_TAP_CMP_MAC_FMT     "0123456789ABCDEFabcdef"
55 #define ETH_TAP_MAC_ARG_FMT     ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
56
57 static struct rte_vdev_driver pmd_tap_drv;
58 static struct rte_vdev_driver pmd_tun_drv;
59
60 static const char *valid_arguments[] = {
61         ETH_TAP_IFACE_ARG,
62         ETH_TAP_REMOTE_ARG,
63         ETH_TAP_MAC_ARG,
64         NULL
65 };
66
67 static int tap_unit;
68 static int tun_unit;
69
70 static int tap_type;
71 static char tuntap_name[8];
72
73 static volatile uint32_t tap_trigger;   /* Rx trigger */
74
75 static struct rte_eth_link pmd_link = {
76         .link_speed = ETH_SPEED_NUM_10G,
77         .link_duplex = ETH_LINK_FULL_DUPLEX,
78         .link_status = ETH_LINK_DOWN,
79         .link_autoneg = ETH_LINK_AUTONEG
80 };
81
82 static void
83 tap_trigger_cb(int sig __rte_unused)
84 {
85         /* Valid trigger values are nonzero */
86         tap_trigger = (tap_trigger + 1) | 0x80000000;
87 }
88
89 /* Specifies on what netdevices the ioctl should be applied */
90 enum ioctl_mode {
91         LOCAL_AND_REMOTE,
92         LOCAL_ONLY,
93         REMOTE_ONLY,
94 };
95
96 static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
97
98 /* Tun/Tap allocation routine
99  *
100  * name is the number of the interface to use, unless NULL to take the host
101  * supplied name.
102  */
103 static int
104 tun_alloc(struct pmd_internals *pmd)
105 {
106         struct ifreq ifr;
107 #ifdef IFF_MULTI_QUEUE
108         unsigned int features;
109 #endif
110         int fd;
111
112         memset(&ifr, 0, sizeof(struct ifreq));
113
114         /*
115          * Do not set IFF_NO_PI as packet information header will be needed
116          * to check if a received packet has been truncated.
117          */
118         ifr.ifr_flags = (tap_type) ? IFF_TAP : IFF_TUN | IFF_POINTOPOINT;
119         snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
120
121         RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name);
122
123         fd = open(TUN_TAP_DEV_PATH, O_RDWR);
124         if (fd < 0) {
125                 RTE_LOG(ERR, PMD, "Unable to create %s interface\n",
126                                 tuntap_name);
127                 goto error;
128         }
129
130 #ifdef IFF_MULTI_QUEUE
131         /* Grab the TUN features to verify we can work multi-queue */
132         if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
133                 RTE_LOG(ERR, PMD, "%s unable to get TUN/TAP features\n",
134                                 tuntap_name);
135                 goto error;
136         }
137         RTE_LOG(DEBUG, PMD, "%s Features %08x\n", tuntap_name, features);
138
139         if (features & IFF_MULTI_QUEUE) {
140                 RTE_LOG(DEBUG, PMD, "  Multi-queue support for %d queues\n",
141                         RTE_PMD_TAP_MAX_QUEUES);
142                 ifr.ifr_flags |= IFF_MULTI_QUEUE;
143         } else
144 #endif
145         {
146                 ifr.ifr_flags |= IFF_ONE_QUEUE;
147                 RTE_LOG(DEBUG, PMD, "  Single queue only support\n");
148         }
149
150         /* Set the TUN/TAP configuration and set the name if needed */
151         if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
152                 RTE_LOG(WARNING, PMD,
153                         "Unable to set TUNSETIFF for %s\n",
154                         ifr.ifr_name);
155                 perror("TUNSETIFF");
156                 goto error;
157         }
158
159         /* Always set the file descriptor to non-blocking */
160         if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
161                 RTE_LOG(WARNING, PMD,
162                         "Unable to set %s to nonblocking\n",
163                         ifr.ifr_name);
164                 perror("F_SETFL, NONBLOCK");
165                 goto error;
166         }
167
168         /* Set up trigger to optimize empty Rx bursts */
169         errno = 0;
170         do {
171                 struct sigaction sa;
172                 int flags = fcntl(fd, F_GETFL);
173
174                 if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
175                         break;
176                 if (sa.sa_handler != tap_trigger_cb) {
177                         /*
178                          * Make sure SIGIO is not already taken. This is done
179                          * as late as possible to leave the application a
180                          * chance to set up its own signal handler first.
181                          */
182                         if (sa.sa_handler != SIG_IGN &&
183                             sa.sa_handler != SIG_DFL) {
184                                 errno = EBUSY;
185                                 break;
186                         }
187                         sa = (struct sigaction){
188                                 .sa_flags = SA_RESTART,
189                                 .sa_handler = tap_trigger_cb,
190                         };
191                         if (sigaction(SIGIO, &sa, NULL) == -1)
192                                 break;
193                 }
194                 /* Enable SIGIO on file descriptor */
195                 fcntl(fd, F_SETFL, flags | O_ASYNC);
196                 fcntl(fd, F_SETOWN, getpid());
197         } while (0);
198         if (errno) {
199                 /* Disable trigger globally in case of error */
200                 tap_trigger = 0;
201                 RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n",
202                         strerror(errno));
203         }
204
205         return fd;
206
207 error:
208         if (fd > 0)
209                 close(fd);
210         return -1;
211 }
212
213 static void
214 tap_verify_csum(struct rte_mbuf *mbuf)
215 {
216         uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
217         uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
218         uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
219         unsigned int l2_len = sizeof(struct ether_hdr);
220         unsigned int l3_len;
221         uint16_t cksum = 0;
222         void *l3_hdr;
223         void *l4_hdr;
224
225         if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
226                 l2_len += 4;
227         else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
228                 l2_len += 8;
229         /* Don't verify checksum for packets with discontinuous L2 header */
230         if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
231                      rte_pktmbuf_data_len(mbuf)))
232                 return;
233         l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
234         if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
235                 struct ipv4_hdr *iph = l3_hdr;
236
237                 /* ihl contains the number of 4-byte words in the header */
238                 l3_len = 4 * (iph->version_ihl & 0xf);
239                 if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
240                         return;
241
242                 cksum = ~rte_raw_cksum(iph, l3_len);
243                 mbuf->ol_flags |= cksum ?
244                         PKT_RX_IP_CKSUM_BAD :
245                         PKT_RX_IP_CKSUM_GOOD;
246         } else if (l3 == RTE_PTYPE_L3_IPV6) {
247                 l3_len = sizeof(struct ipv6_hdr);
248         } else {
249                 /* IPv6 extensions are not supported */
250                 return;
251         }
252         if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
253                 l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
254                 /* Don't verify checksum for multi-segment packets. */
255                 if (mbuf->nb_segs > 1)
256                         return;
257                 if (l3 == RTE_PTYPE_L3_IPV4)
258                         cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
259                 else if (l3 == RTE_PTYPE_L3_IPV6)
260                         cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
261                 mbuf->ol_flags |= cksum ?
262                         PKT_RX_L4_CKSUM_BAD :
263                         PKT_RX_L4_CKSUM_GOOD;
264         }
265 }
266
267 static uint64_t
268 tap_rx_offload_get_port_capa(void)
269 {
270         /*
271          * In order to support legacy apps,
272          * report capabilities also as port capabilities.
273          */
274         return DEV_RX_OFFLOAD_SCATTER |
275                DEV_RX_OFFLOAD_IPV4_CKSUM |
276                DEV_RX_OFFLOAD_UDP_CKSUM |
277                DEV_RX_OFFLOAD_TCP_CKSUM |
278                DEV_RX_OFFLOAD_CRC_STRIP;
279 }
280
281 static uint64_t
282 tap_rx_offload_get_queue_capa(void)
283 {
284         return DEV_RX_OFFLOAD_SCATTER |
285                DEV_RX_OFFLOAD_IPV4_CKSUM |
286                DEV_RX_OFFLOAD_UDP_CKSUM |
287                DEV_RX_OFFLOAD_TCP_CKSUM |
288                DEV_RX_OFFLOAD_CRC_STRIP;
289 }
290
291 static bool
292 tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
293 {
294         uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
295         uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
296         uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
297
298         if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
299             offloads)
300                 return false;
301         if ((port_offloads ^ offloads) & port_supp_offloads)
302                 return false;
303         return true;
304 }
305
306 /* Callback to handle the rx burst of packets to the correct interface and
307  * file descriptor(s) in a multi-queue setup.
308  */
309 static uint16_t
310 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
311 {
312         struct rx_queue *rxq = queue;
313         uint16_t num_rx;
314         unsigned long num_rx_bytes = 0;
315         uint32_t trigger = tap_trigger;
316
317         if (trigger == rxq->trigger_seen)
318                 return 0;
319         if (trigger)
320                 rxq->trigger_seen = trigger;
321         rte_compiler_barrier();
322         for (num_rx = 0; num_rx < nb_pkts; ) {
323                 struct rte_mbuf *mbuf = rxq->pool;
324                 struct rte_mbuf *seg = NULL;
325                 struct rte_mbuf *new_tail = NULL;
326                 uint16_t data_off = rte_pktmbuf_headroom(mbuf);
327                 int len;
328
329                 len = readv(rxq->fd, *rxq->iovecs,
330                             1 +
331                             (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
332                              rxq->nb_rx_desc : 1));
333                 if (len < (int)sizeof(struct tun_pi))
334                         break;
335
336                 /* Packet couldn't fit in the provided mbuf */
337                 if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
338                         rxq->stats.ierrors++;
339                         continue;
340                 }
341
342                 len -= sizeof(struct tun_pi);
343
344                 mbuf->pkt_len = len;
345                 mbuf->port = rxq->in_port;
346                 while (1) {
347                         struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
348
349                         if (unlikely(!buf)) {
350                                 rxq->stats.rx_nombuf++;
351                                 /* No new buf has been allocated: do nothing */
352                                 if (!new_tail || !seg)
353                                         goto end;
354
355                                 seg->next = NULL;
356                                 rte_pktmbuf_free(mbuf);
357
358                                 goto end;
359                         }
360                         seg = seg ? seg->next : mbuf;
361                         if (rxq->pool == mbuf)
362                                 rxq->pool = buf;
363                         if (new_tail)
364                                 new_tail->next = buf;
365                         new_tail = buf;
366                         new_tail->next = seg->next;
367
368                         /* iovecs[0] is reserved for packet info (pi) */
369                         (*rxq->iovecs)[mbuf->nb_segs].iov_len =
370                                 buf->buf_len - data_off;
371                         (*rxq->iovecs)[mbuf->nb_segs].iov_base =
372                                 (char *)buf->buf_addr + data_off;
373
374                         seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
375                         seg->data_off = data_off;
376
377                         len -= seg->data_len;
378                         if (len <= 0)
379                                 break;
380                         mbuf->nb_segs++;
381                         /* First segment has headroom, not the others */
382                         data_off = 0;
383                 }
384                 seg->next = NULL;
385                 mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
386                                                       RTE_PTYPE_ALL_MASK);
387                 if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
388                         tap_verify_csum(mbuf);
389
390                 /* account for the receive frame */
391                 bufs[num_rx++] = mbuf;
392                 num_rx_bytes += mbuf->pkt_len;
393         }
394 end:
395         rxq->stats.ipackets += num_rx;
396         rxq->stats.ibytes += num_rx_bytes;
397
398         return num_rx;
399 }
400
401 static uint64_t
402 tap_tx_offload_get_port_capa(void)
403 {
404         /*
405          * In order to support legacy apps,
406          * report capabilities also as port capabilities.
407          */
408         return DEV_TX_OFFLOAD_MULTI_SEGS |
409                DEV_TX_OFFLOAD_IPV4_CKSUM |
410                DEV_TX_OFFLOAD_UDP_CKSUM |
411                DEV_TX_OFFLOAD_TCP_CKSUM;
412 }
413
414 static uint64_t
415 tap_tx_offload_get_queue_capa(void)
416 {
417         return DEV_TX_OFFLOAD_MULTI_SEGS |
418                DEV_TX_OFFLOAD_IPV4_CKSUM |
419                DEV_TX_OFFLOAD_UDP_CKSUM |
420                DEV_TX_OFFLOAD_TCP_CKSUM;
421 }
422
423 static bool
424 tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
425 {
426         uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
427         uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
428         uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
429
430         if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
431             offloads)
432                 return false;
433         /* Verify we have no conflict with port offloads */
434         if ((port_offloads ^ offloads) & port_supp_offloads)
435                 return false;
436         return true;
437 }
438
439 static void
440 tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
441                unsigned int l3_len)
442 {
443         void *l3_hdr = packet + l2_len;
444
445         if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
446                 struct ipv4_hdr *iph = l3_hdr;
447                 uint16_t cksum;
448
449                 iph->hdr_checksum = 0;
450                 cksum = rte_raw_cksum(iph, l3_len);
451                 iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
452         }
453         if (ol_flags & PKT_TX_L4_MASK) {
454                 uint16_t l4_len;
455                 uint32_t cksum;
456                 uint16_t *l4_cksum;
457                 void *l4_hdr;
458
459                 l4_hdr = packet + l2_len + l3_len;
460                 if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
461                         l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
462                 else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
463                         l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
464                 else
465                         return;
466                 *l4_cksum = 0;
467                 if (ol_flags & PKT_TX_IPV4) {
468                         struct ipv4_hdr *iph = l3_hdr;
469
470                         l4_len = rte_be_to_cpu_16(iph->total_length) - l3_len;
471                         cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
472                 } else {
473                         struct ipv6_hdr *ip6h = l3_hdr;
474
475                         /* payload_len does not include ext headers */
476                         l4_len = rte_be_to_cpu_16(ip6h->payload_len) -
477                                 l3_len + sizeof(struct ipv6_hdr);
478                         cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
479                 }
480                 cksum += rte_raw_cksum(l4_hdr, l4_len);
481                 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
482                 cksum = (~cksum) & 0xffff;
483                 if (cksum == 0)
484                         cksum = 0xffff;
485                 *l4_cksum = cksum;
486         }
487 }
488
489 /* Callback to handle sending packets from the tap interface
490  */
491 static uint16_t
492 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
493 {
494         struct tx_queue *txq = queue;
495         uint16_t num_tx = 0;
496         unsigned long num_tx_bytes = 0;
497         uint32_t max_size;
498         int i;
499
500         if (unlikely(nb_pkts == 0))
501                 return 0;
502
503         max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
504         for (i = 0; i < nb_pkts; i++) {
505                 struct rte_mbuf *mbuf = bufs[num_tx];
506                 struct iovec iovecs[mbuf->nb_segs + 1];
507                 struct tun_pi pi = { .flags = 0, .proto = 0x00 };
508                 struct rte_mbuf *seg = mbuf;
509                 char m_copy[mbuf->data_len];
510                 int n;
511                 int j;
512
513                 /* stats.errs will be incremented */
514                 if (rte_pktmbuf_pkt_len(mbuf) > max_size)
515                         break;
516
517                 /*
518                  * TUN and TAP are created with IFF_NO_PI disabled.
519                  * For TUN PMD this mandatory as fields are used by
520                  * Kernel tun.c to determine whether its IP or non IP
521                  * packets.
522                  *
523                  * The logic fetches the first byte of data from mbuf.
524                  * compares whether its v4 or v6. If none matches default
525                  * value 0x00 is taken for protocol field.
526                  */
527                 char *buff_data = rte_pktmbuf_mtod(seg, void *);
528                 j = (*buff_data & 0xf0);
529                 pi.proto = (j == 0x40) ? 0x0008 :
530                                 (j == 0x60) ? 0xdd86 : 0x00;
531
532                 iovecs[0].iov_base = &pi;
533                 iovecs[0].iov_len = sizeof(pi);
534                 for (j = 1; j <= mbuf->nb_segs; j++) {
535                         iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
536                         iovecs[j].iov_base =
537                                 rte_pktmbuf_mtod(seg, void *);
538                         seg = seg->next;
539                 }
540                 if (txq->csum &&
541                     ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
542                      (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
543                      (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
544                         /* Support only packets with all data in the same seg */
545                         if (mbuf->nb_segs > 1)
546                                 break;
547                         /* To change checksums, work on a copy of data. */
548                         rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
549                                    rte_pktmbuf_data_len(mbuf));
550                         tap_tx_offload(m_copy, mbuf->ol_flags,
551                                        mbuf->l2_len, mbuf->l3_len);
552                         iovecs[1].iov_base = m_copy;
553                 }
554                 /* copy the tx frame data */
555                 n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
556                 if (n <= 0)
557                         break;
558
559                 num_tx++;
560                 num_tx_bytes += mbuf->pkt_len;
561                 rte_pktmbuf_free(mbuf);
562         }
563
564         txq->stats.opackets += num_tx;
565         txq->stats.errs += nb_pkts - num_tx;
566         txq->stats.obytes += num_tx_bytes;
567
568         return num_tx;
569 }
570
571 static const char *
572 tap_ioctl_req2str(unsigned long request)
573 {
574         switch (request) {
575         case SIOCSIFFLAGS:
576                 return "SIOCSIFFLAGS";
577         case SIOCGIFFLAGS:
578                 return "SIOCGIFFLAGS";
579         case SIOCGIFHWADDR:
580                 return "SIOCGIFHWADDR";
581         case SIOCSIFHWADDR:
582                 return "SIOCSIFHWADDR";
583         case SIOCSIFMTU:
584                 return "SIOCSIFMTU";
585         }
586         return "UNKNOWN";
587 }
588
589 static int
590 tap_ioctl(struct pmd_internals *pmd, unsigned long request,
591           struct ifreq *ifr, int set, enum ioctl_mode mode)
592 {
593         short req_flags = ifr->ifr_flags;
594         int remote = pmd->remote_if_index &&
595                 (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
596
597         if (!pmd->remote_if_index && mode == REMOTE_ONLY)
598                 return 0;
599         /*
600          * If there is a remote netdevice, apply ioctl on it, then apply it on
601          * the tap netdevice.
602          */
603 apply:
604         if (remote)
605                 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
606         else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
607                 snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
608         switch (request) {
609         case SIOCSIFFLAGS:
610                 /* fetch current flags to leave other flags untouched */
611                 if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
612                         goto error;
613                 if (set)
614                         ifr->ifr_flags |= req_flags;
615                 else
616                         ifr->ifr_flags &= ~req_flags;
617                 break;
618         case SIOCGIFFLAGS:
619         case SIOCGIFHWADDR:
620         case SIOCSIFHWADDR:
621         case SIOCSIFMTU:
622                 break;
623         default:
624                 RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
625                         pmd->name);
626                 return -EINVAL;
627         }
628         if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
629                 goto error;
630         if (remote-- && mode == LOCAL_AND_REMOTE)
631                 goto apply;
632         return 0;
633
634 error:
635         RTE_LOG(DEBUG, PMD, "%s: %s(%s) failed: %s(%d)\n", ifr->ifr_name,
636                 __func__, tap_ioctl_req2str(request), strerror(errno), errno);
637         return -errno;
638 }
639
640 static int
641 tap_link_set_down(struct rte_eth_dev *dev)
642 {
643         struct pmd_internals *pmd = dev->data->dev_private;
644         struct ifreq ifr = { .ifr_flags = IFF_UP };
645
646         dev->data->dev_link.link_status = ETH_LINK_DOWN;
647         return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
648 }
649
650 static int
651 tap_link_set_up(struct rte_eth_dev *dev)
652 {
653         struct pmd_internals *pmd = dev->data->dev_private;
654         struct ifreq ifr = { .ifr_flags = IFF_UP };
655
656         dev->data->dev_link.link_status = ETH_LINK_UP;
657         return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
658 }
659
660 static int
661 tap_dev_start(struct rte_eth_dev *dev)
662 {
663         int err;
664
665         err = tap_intr_handle_set(dev, 1);
666         if (err)
667                 return err;
668         return tap_link_set_up(dev);
669 }
670
671 /* This function gets called when the current port gets stopped.
672  */
673 static void
674 tap_dev_stop(struct rte_eth_dev *dev)
675 {
676         tap_intr_handle_set(dev, 0);
677         tap_link_set_down(dev);
678 }
679
680 static int
681 tap_dev_configure(struct rte_eth_dev *dev)
682 {
683         uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa();
684         uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
685
686         if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
687                 rte_errno = ENOTSUP;
688                 RTE_LOG(ERR, PMD,
689                         "Some Tx offloads are not supported "
690                         "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
691                         tx_offloads, supp_tx_offloads);
692                 return -rte_errno;
693         }
694         if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
695                 RTE_LOG(ERR, PMD,
696                         "%s: number of rx queues %d exceeds max num of queues %d\n",
697                         dev->device->name,
698                         dev->data->nb_rx_queues,
699                         RTE_PMD_TAP_MAX_QUEUES);
700                 return -1;
701         }
702         if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
703                 RTE_LOG(ERR, PMD,
704                         "%s: number of tx queues %d exceeds max num of queues %d\n",
705                         dev->device->name,
706                         dev->data->nb_tx_queues,
707                         RTE_PMD_TAP_MAX_QUEUES);
708                 return -1;
709         }
710
711         RTE_LOG(INFO, PMD, "%s: %p: TX configured queues number: %u\n",
712              dev->device->name, (void *)dev, dev->data->nb_tx_queues);
713
714         RTE_LOG(INFO, PMD, "%s: %p: RX configured queues number: %u\n",
715              dev->device->name, (void *)dev, dev->data->nb_rx_queues);
716
717         return 0;
718 }
719
720 static uint32_t
721 tap_dev_speed_capa(void)
722 {
723         uint32_t speed = pmd_link.link_speed;
724         uint32_t capa = 0;
725
726         if (speed >= ETH_SPEED_NUM_10M)
727                 capa |= ETH_LINK_SPEED_10M;
728         if (speed >= ETH_SPEED_NUM_100M)
729                 capa |= ETH_LINK_SPEED_100M;
730         if (speed >= ETH_SPEED_NUM_1G)
731                 capa |= ETH_LINK_SPEED_1G;
732         if (speed >= ETH_SPEED_NUM_5G)
733                 capa |= ETH_LINK_SPEED_2_5G;
734         if (speed >= ETH_SPEED_NUM_5G)
735                 capa |= ETH_LINK_SPEED_5G;
736         if (speed >= ETH_SPEED_NUM_10G)
737                 capa |= ETH_LINK_SPEED_10G;
738         if (speed >= ETH_SPEED_NUM_20G)
739                 capa |= ETH_LINK_SPEED_20G;
740         if (speed >= ETH_SPEED_NUM_25G)
741                 capa |= ETH_LINK_SPEED_25G;
742         if (speed >= ETH_SPEED_NUM_40G)
743                 capa |= ETH_LINK_SPEED_40G;
744         if (speed >= ETH_SPEED_NUM_50G)
745                 capa |= ETH_LINK_SPEED_50G;
746         if (speed >= ETH_SPEED_NUM_56G)
747                 capa |= ETH_LINK_SPEED_56G;
748         if (speed >= ETH_SPEED_NUM_100G)
749                 capa |= ETH_LINK_SPEED_100G;
750
751         return capa;
752 }
753
754 static void
755 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
756 {
757         struct pmd_internals *internals = dev->data->dev_private;
758
759         dev_info->if_index = internals->if_index;
760         dev_info->max_mac_addrs = 1;
761         dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
762         dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
763         dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
764         dev_info->min_rx_bufsize = 0;
765         dev_info->speed_capa = tap_dev_speed_capa();
766         dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
767         dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
768                                     dev_info->rx_queue_offload_capa;
769         dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
770         dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
771                                     dev_info->tx_queue_offload_capa;
772 }
773
774 static int
775 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
776 {
777         unsigned int i, imax;
778         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
779         unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
780         unsigned long rx_nombuf = 0, ierrors = 0;
781         const struct pmd_internals *pmd = dev->data->dev_private;
782
783         /* rx queue statistics */
784         imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
785                 dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
786         for (i = 0; i < imax; i++) {
787                 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
788                 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
789                 rx_total += tap_stats->q_ipackets[i];
790                 rx_bytes_total += tap_stats->q_ibytes[i];
791                 rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
792                 ierrors += pmd->rxq[i].stats.ierrors;
793         }
794
795         /* tx queue statistics */
796         imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
797                 dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
798
799         for (i = 0; i < imax; i++) {
800                 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
801                 tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
802                 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
803                 tx_total += tap_stats->q_opackets[i];
804                 tx_err_total += tap_stats->q_errors[i];
805                 tx_bytes_total += tap_stats->q_obytes[i];
806         }
807
808         tap_stats->ipackets = rx_total;
809         tap_stats->ibytes = rx_bytes_total;
810         tap_stats->ierrors = ierrors;
811         tap_stats->rx_nombuf = rx_nombuf;
812         tap_stats->opackets = tx_total;
813         tap_stats->oerrors = tx_err_total;
814         tap_stats->obytes = tx_bytes_total;
815         return 0;
816 }
817
818 static void
819 tap_stats_reset(struct rte_eth_dev *dev)
820 {
821         int i;
822         struct pmd_internals *pmd = dev->data->dev_private;
823
824         for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
825                 pmd->rxq[i].stats.ipackets = 0;
826                 pmd->rxq[i].stats.ibytes = 0;
827                 pmd->rxq[i].stats.ierrors = 0;
828                 pmd->rxq[i].stats.rx_nombuf = 0;
829
830                 pmd->txq[i].stats.opackets = 0;
831                 pmd->txq[i].stats.errs = 0;
832                 pmd->txq[i].stats.obytes = 0;
833         }
834 }
835
836 static void
837 tap_dev_close(struct rte_eth_dev *dev)
838 {
839         int i;
840         struct pmd_internals *internals = dev->data->dev_private;
841
842         tap_link_set_down(dev);
843         tap_flow_flush(dev, NULL);
844         tap_flow_implicit_flush(internals, NULL);
845
846         for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
847                 if (internals->rxq[i].fd != -1) {
848                         close(internals->rxq[i].fd);
849                         internals->rxq[i].fd = -1;
850                 }
851                 if (internals->txq[i].fd != -1) {
852                         close(internals->txq[i].fd);
853                         internals->txq[i].fd = -1;
854                 }
855         }
856
857         if (internals->remote_if_index) {
858                 /* Restore initial remote state */
859                 ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
860                                 &internals->remote_initial_flags);
861         }
862 }
863
864 static void
865 tap_rx_queue_release(void *queue)
866 {
867         struct rx_queue *rxq = queue;
868
869         if (rxq && (rxq->fd > 0)) {
870                 close(rxq->fd);
871                 rxq->fd = -1;
872                 rte_pktmbuf_free(rxq->pool);
873                 rte_free(rxq->iovecs);
874                 rxq->pool = NULL;
875                 rxq->iovecs = NULL;
876         }
877 }
878
879 static void
880 tap_tx_queue_release(void *queue)
881 {
882         struct tx_queue *txq = queue;
883
884         if (txq && (txq->fd > 0)) {
885                 close(txq->fd);
886                 txq->fd = -1;
887         }
888 }
889
890 static int
891 tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
892 {
893         struct rte_eth_link *dev_link = &dev->data->dev_link;
894         struct pmd_internals *pmd = dev->data->dev_private;
895         struct ifreq ifr = { .ifr_flags = 0 };
896
897         if (pmd->remote_if_index) {
898                 tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
899                 if (!(ifr.ifr_flags & IFF_UP) ||
900                     !(ifr.ifr_flags & IFF_RUNNING)) {
901                         dev_link->link_status = ETH_LINK_DOWN;
902                         return 0;
903                 }
904         }
905         tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
906         dev_link->link_status =
907                 ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
908                  ETH_LINK_UP :
909                  ETH_LINK_DOWN);
910         return 0;
911 }
912
913 static void
914 tap_promisc_enable(struct rte_eth_dev *dev)
915 {
916         struct pmd_internals *pmd = dev->data->dev_private;
917         struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
918
919         dev->data->promiscuous = 1;
920         tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
921         if (pmd->remote_if_index && !pmd->flow_isolate)
922                 tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
923 }
924
925 static void
926 tap_promisc_disable(struct rte_eth_dev *dev)
927 {
928         struct pmd_internals *pmd = dev->data->dev_private;
929         struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
930
931         dev->data->promiscuous = 0;
932         tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
933         if (pmd->remote_if_index && !pmd->flow_isolate)
934                 tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
935 }
936
937 static void
938 tap_allmulti_enable(struct rte_eth_dev *dev)
939 {
940         struct pmd_internals *pmd = dev->data->dev_private;
941         struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
942
943         dev->data->all_multicast = 1;
944         tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
945         if (pmd->remote_if_index && !pmd->flow_isolate)
946                 tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
947 }
948
949 static void
950 tap_allmulti_disable(struct rte_eth_dev *dev)
951 {
952         struct pmd_internals *pmd = dev->data->dev_private;
953         struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
954
955         dev->data->all_multicast = 0;
956         tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
957         if (pmd->remote_if_index && !pmd->flow_isolate)
958                 tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
959 }
960
961 static int
962 tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
963 {
964         struct pmd_internals *pmd = dev->data->dev_private;
965         enum ioctl_mode mode = LOCAL_ONLY;
966         struct ifreq ifr;
967         int ret;
968
969         if (is_zero_ether_addr(mac_addr)) {
970                 RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
971                         dev->device->name);
972                 return -EINVAL;
973         }
974         /* Check the actual current MAC address on the tap netdevice */
975         ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
976         if (ret < 0)
977                 return ret;
978         if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
979                                mac_addr))
980                 return 0;
981         /* Check the current MAC address on the remote */
982         ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
983         if (ret < 0)
984                 return ret;
985         if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
986                                mac_addr))
987                 mode = LOCAL_AND_REMOTE;
988         ifr.ifr_hwaddr.sa_family = AF_LOCAL;
989         rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
990         ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
991         if (ret < 0)
992                 return ret;
993         rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
994         if (pmd->remote_if_index && !pmd->flow_isolate) {
995                 /* Replace MAC redirection rule after a MAC change */
996                 ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
997                 if (ret < 0) {
998                         RTE_LOG(ERR, PMD,
999                                 "%s: Couldn't delete MAC redirection rule\n",
1000                                 dev->device->name);
1001                         return ret;
1002                 }
1003                 ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
1004                 if (ret < 0) {
1005                         RTE_LOG(ERR, PMD,
1006                                 "%s: Couldn't add MAC redirection rule\n",
1007                                 dev->device->name);
1008                         return ret;
1009                 }
1010         }
1011
1012         return 0;
1013 }
1014
1015 static int
1016 tap_setup_queue(struct rte_eth_dev *dev,
1017                 struct pmd_internals *internals,
1018                 uint16_t qid,
1019                 int is_rx)
1020 {
1021         int *fd;
1022         int *other_fd;
1023         const char *dir;
1024         struct pmd_internals *pmd = dev->data->dev_private;
1025         struct rx_queue *rx = &internals->rxq[qid];
1026         struct tx_queue *tx = &internals->txq[qid];
1027
1028         if (is_rx) {
1029                 fd = &rx->fd;
1030                 other_fd = &tx->fd;
1031                 dir = "rx";
1032         } else {
1033                 fd = &tx->fd;
1034                 other_fd = &rx->fd;
1035                 dir = "tx";
1036         }
1037         if (*fd != -1) {
1038                 /* fd for this queue already exists */
1039                 RTE_LOG(DEBUG, PMD, "%s: fd %d for %s queue qid %d exists\n",
1040                         pmd->name, *fd, dir, qid);
1041         } else if (*other_fd != -1) {
1042                 /* Only other_fd exists. dup it */
1043                 *fd = dup(*other_fd);
1044                 if (*fd < 0) {
1045                         *fd = -1;
1046                         RTE_LOG(ERR, PMD, "%s: dup() failed.\n",
1047                                 pmd->name);
1048                         return -1;
1049                 }
1050                 RTE_LOG(DEBUG, PMD, "%s: dup fd %d for %s queue qid %d (%d)\n",
1051                         pmd->name, *other_fd, dir, qid, *fd);
1052         } else {
1053                 /* Both RX and TX fds do not exist (equal -1). Create fd */
1054                 *fd = tun_alloc(pmd);
1055                 if (*fd < 0) {
1056                         *fd = -1; /* restore original value */
1057                         RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n",
1058                                 pmd->name);
1059                         return -1;
1060                 }
1061                 RTE_LOG(DEBUG, PMD, "%s: add %s queue for qid %d fd %d\n",
1062                         pmd->name, dir, qid, *fd);
1063         }
1064
1065         tx->mtu = &dev->data->mtu;
1066         rx->rxmode = &dev->data->dev_conf.rxmode;
1067
1068         return *fd;
1069 }
1070
1071 static int
1072 tap_rx_queue_setup(struct rte_eth_dev *dev,
1073                    uint16_t rx_queue_id,
1074                    uint16_t nb_rx_desc,
1075                    unsigned int socket_id,
1076                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1077                    struct rte_mempool *mp)
1078 {
1079         struct pmd_internals *internals = dev->data->dev_private;
1080         struct rx_queue *rxq = &internals->rxq[rx_queue_id];
1081         struct rte_mbuf **tmp = &rxq->pool;
1082         long iov_max = sysconf(_SC_IOV_MAX);
1083         uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
1084         struct iovec (*iovecs)[nb_desc + 1];
1085         int data_off = RTE_PKTMBUF_HEADROOM;
1086         int ret = 0;
1087         int fd;
1088         int i;
1089
1090         if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
1091                 RTE_LOG(WARNING, PMD,
1092                         "nb_rx_queues %d too small or mempool NULL\n",
1093                         dev->data->nb_rx_queues);
1094                 return -1;
1095         }
1096
1097         /* Verify application offloads are valid for our port and queue. */
1098         if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
1099                 rte_errno = ENOTSUP;
1100                 RTE_LOG(ERR, PMD,
1101                         "%p: Rx queue offloads 0x%" PRIx64
1102                         " don't match port offloads 0x%" PRIx64
1103                         " or supported offloads 0x%" PRIx64 "\n",
1104                         (void *)dev, rx_conf->offloads,
1105                         dev->data->dev_conf.rxmode.offloads,
1106                         (tap_rx_offload_get_port_capa() |
1107                          tap_rx_offload_get_queue_capa()));
1108                 return -rte_errno;
1109         }
1110         rxq->mp = mp;
1111         rxq->trigger_seen = 1; /* force initial burst */
1112         rxq->in_port = dev->data->port_id;
1113         rxq->nb_rx_desc = nb_desc;
1114         iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
1115                                     socket_id);
1116         if (!iovecs) {
1117                 RTE_LOG(WARNING, PMD,
1118                         "%s: Couldn't allocate %d RX descriptors\n",
1119                         dev->device->name, nb_desc);
1120                 return -ENOMEM;
1121         }
1122         rxq->iovecs = iovecs;
1123
1124         dev->data->rx_queues[rx_queue_id] = rxq;
1125         fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
1126         if (fd == -1) {
1127                 ret = fd;
1128                 goto error;
1129         }
1130
1131         (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
1132         (*rxq->iovecs)[0].iov_base = &rxq->pi;
1133
1134         for (i = 1; i <= nb_desc; i++) {
1135                 *tmp = rte_pktmbuf_alloc(rxq->mp);
1136                 if (!*tmp) {
1137                         RTE_LOG(WARNING, PMD,
1138                                 "%s: couldn't allocate memory for queue %d\n",
1139                                 dev->device->name, rx_queue_id);
1140                         ret = -ENOMEM;
1141                         goto error;
1142                 }
1143                 (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
1144                 (*rxq->iovecs)[i].iov_base =
1145                         (char *)(*tmp)->buf_addr + data_off;
1146                 data_off = 0;
1147                 tmp = &(*tmp)->next;
1148         }
1149
1150         RTE_LOG(DEBUG, PMD, "  RX TUNTAP device name %s, qid %d on fd %d\n",
1151                 internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
1152
1153         return 0;
1154
1155 error:
1156         rte_pktmbuf_free(rxq->pool);
1157         rxq->pool = NULL;
1158         rte_free(rxq->iovecs);
1159         rxq->iovecs = NULL;
1160         return ret;
1161 }
1162
1163 static int
1164 tap_tx_queue_setup(struct rte_eth_dev *dev,
1165                    uint16_t tx_queue_id,
1166                    uint16_t nb_tx_desc __rte_unused,
1167                    unsigned int socket_id __rte_unused,
1168                    const struct rte_eth_txconf *tx_conf)
1169 {
1170         struct pmd_internals *internals = dev->data->dev_private;
1171         struct tx_queue *txq;
1172         int ret;
1173
1174         if (tx_queue_id >= dev->data->nb_tx_queues)
1175                 return -1;
1176         dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
1177         txq = dev->data->tx_queues[tx_queue_id];
1178         /*
1179          * Don't verify port offloads for application which
1180          * use the old API.
1181          */
1182         if (tx_conf != NULL &&
1183             !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
1184                 if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
1185                         txq->csum = !!(tx_conf->offloads &
1186                                         (DEV_TX_OFFLOAD_IPV4_CKSUM |
1187                                          DEV_TX_OFFLOAD_UDP_CKSUM |
1188                                          DEV_TX_OFFLOAD_TCP_CKSUM));
1189                 } else {
1190                         rte_errno = ENOTSUP;
1191                         RTE_LOG(ERR, PMD,
1192                                 "%p: Tx queue offloads 0x%" PRIx64
1193                                 " don't match port offloads 0x%" PRIx64
1194                                 " or supported offloads 0x%" PRIx64,
1195                                 (void *)dev, tx_conf->offloads,
1196                                 dev->data->dev_conf.txmode.offloads,
1197                                 tap_tx_offload_get_port_capa());
1198                         return -rte_errno;
1199                 }
1200         }
1201         ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
1202         if (ret == -1)
1203                 return -1;
1204         RTE_LOG(DEBUG, PMD,
1205                 "  TX TUNTAP device name %s, qid %d on fd %d csum %s\n",
1206                 internals->name, tx_queue_id, internals->txq[tx_queue_id].fd,
1207                 txq->csum ? "on" : "off");
1208
1209         return 0;
1210 }
1211
1212 static int
1213 tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1214 {
1215         struct pmd_internals *pmd = dev->data->dev_private;
1216         struct ifreq ifr = { .ifr_mtu = mtu };
1217         int err = 0;
1218
1219         err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
1220         if (!err)
1221                 dev->data->mtu = mtu;
1222
1223         return err;
1224 }
1225
1226 static int
1227 tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
1228                      struct ether_addr *mc_addr_set __rte_unused,
1229                      uint32_t nb_mc_addr __rte_unused)
1230 {
1231         /*
1232          * Nothing to do actually: the tap has no filtering whatsoever, every
1233          * packet is received.
1234          */
1235         return 0;
1236 }
1237
1238 static int
1239 tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
1240 {
1241         struct rte_eth_dev *dev = arg;
1242         struct pmd_internals *pmd = dev->data->dev_private;
1243         struct ifinfomsg *info = NLMSG_DATA(nh);
1244
1245         if (nh->nlmsg_type != RTM_NEWLINK ||
1246             (info->ifi_index != pmd->if_index &&
1247              info->ifi_index != pmd->remote_if_index))
1248                 return 0;
1249         return tap_link_update(dev, 0);
1250 }
1251
1252 static void
1253 tap_dev_intr_handler(void *cb_arg)
1254 {
1255         struct rte_eth_dev *dev = cb_arg;
1256         struct pmd_internals *pmd = dev->data->dev_private;
1257
1258         tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
1259 }
1260
1261 static int
1262 tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
1263 {
1264         struct pmd_internals *pmd = dev->data->dev_private;
1265
1266         /* In any case, disable interrupt if the conf is no longer there. */
1267         if (!dev->data->dev_conf.intr_conf.lsc) {
1268                 if (pmd->intr_handle.fd != -1) {
1269                         tap_nl_final(pmd->intr_handle.fd);
1270                         rte_intr_callback_unregister(&pmd->intr_handle,
1271                                 tap_dev_intr_handler, dev);
1272                 }
1273                 return 0;
1274         }
1275         if (set) {
1276                 pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK);
1277                 if (unlikely(pmd->intr_handle.fd == -1))
1278                         return -EBADF;
1279                 return rte_intr_callback_register(
1280                         &pmd->intr_handle, tap_dev_intr_handler, dev);
1281         }
1282         tap_nl_final(pmd->intr_handle.fd);
1283         return rte_intr_callback_unregister(&pmd->intr_handle,
1284                                             tap_dev_intr_handler, dev);
1285 }
1286
1287 static int
1288 tap_intr_handle_set(struct rte_eth_dev *dev, int set)
1289 {
1290         int err;
1291
1292         err = tap_lsc_intr_handle_set(dev, set);
1293         if (err)
1294                 return err;
1295         err = tap_rx_intr_vec_set(dev, set);
1296         if (err && set)
1297                 tap_lsc_intr_handle_set(dev, 0);
1298         return err;
1299 }
1300
1301 static const uint32_t*
1302 tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1303 {
1304         static const uint32_t ptypes[] = {
1305                 RTE_PTYPE_INNER_L2_ETHER,
1306                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1307                 RTE_PTYPE_INNER_L2_ETHER_QINQ,
1308                 RTE_PTYPE_INNER_L3_IPV4,
1309                 RTE_PTYPE_INNER_L3_IPV4_EXT,
1310                 RTE_PTYPE_INNER_L3_IPV6,
1311                 RTE_PTYPE_INNER_L3_IPV6_EXT,
1312                 RTE_PTYPE_INNER_L4_FRAG,
1313                 RTE_PTYPE_INNER_L4_UDP,
1314                 RTE_PTYPE_INNER_L4_TCP,
1315                 RTE_PTYPE_INNER_L4_SCTP,
1316                 RTE_PTYPE_L2_ETHER,
1317                 RTE_PTYPE_L2_ETHER_VLAN,
1318                 RTE_PTYPE_L2_ETHER_QINQ,
1319                 RTE_PTYPE_L3_IPV4,
1320                 RTE_PTYPE_L3_IPV4_EXT,
1321                 RTE_PTYPE_L3_IPV6_EXT,
1322                 RTE_PTYPE_L3_IPV6,
1323                 RTE_PTYPE_L4_FRAG,
1324                 RTE_PTYPE_L4_UDP,
1325                 RTE_PTYPE_L4_TCP,
1326                 RTE_PTYPE_L4_SCTP,
1327         };
1328
1329         return ptypes;
1330 }
1331
1332 static int
1333 tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
1334                   struct rte_eth_fc_conf *fc_conf)
1335 {
1336         fc_conf->mode = RTE_FC_NONE;
1337         return 0;
1338 }
1339
1340 static int
1341 tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
1342                   struct rte_eth_fc_conf *fc_conf)
1343 {
1344         if (fc_conf->mode != RTE_FC_NONE)
1345                 return -ENOTSUP;
1346         return 0;
1347 }
1348
1349 static const struct eth_dev_ops ops = {
1350         .dev_start              = tap_dev_start,
1351         .dev_stop               = tap_dev_stop,
1352         .dev_close              = tap_dev_close,
1353         .dev_configure          = tap_dev_configure,
1354         .dev_infos_get          = tap_dev_info,
1355         .rx_queue_setup         = tap_rx_queue_setup,
1356         .tx_queue_setup         = tap_tx_queue_setup,
1357         .rx_queue_release       = tap_rx_queue_release,
1358         .tx_queue_release       = tap_tx_queue_release,
1359         .flow_ctrl_get          = tap_flow_ctrl_get,
1360         .flow_ctrl_set          = tap_flow_ctrl_set,
1361         .link_update            = tap_link_update,
1362         .dev_set_link_up        = tap_link_set_up,
1363         .dev_set_link_down      = tap_link_set_down,
1364         .promiscuous_enable     = tap_promisc_enable,
1365         .promiscuous_disable    = tap_promisc_disable,
1366         .allmulticast_enable    = tap_allmulti_enable,
1367         .allmulticast_disable   = tap_allmulti_disable,
1368         .mac_addr_set           = tap_mac_set,
1369         .mtu_set                = tap_mtu_set,
1370         .set_mc_addr_list       = tap_set_mc_addr_list,
1371         .stats_get              = tap_stats_get,
1372         .stats_reset            = tap_stats_reset,
1373         .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
1374         .filter_ctrl            = tap_dev_filter_ctrl,
1375 };
1376
1377 static int
1378 eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
1379                    char *remote_iface, struct ether_addr *mac_addr)
1380 {
1381         int numa_node = rte_socket_id();
1382         struct rte_eth_dev *dev;
1383         struct pmd_internals *pmd;
1384         struct rte_eth_dev_data *data;
1385         struct ifreq ifr;
1386         int i;
1387
1388         RTE_LOG(DEBUG, PMD, "%s device on numa %u\n",
1389                         tuntap_name, rte_socket_id());
1390
1391         dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1392         if (!dev) {
1393                 RTE_LOG(ERR, PMD, "%s Unable to allocate device struct\n",
1394                                 tuntap_name);
1395                 goto error_exit_nodev;
1396         }
1397
1398         pmd = dev->data->dev_private;
1399         pmd->dev = dev;
1400         snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
1401
1402         pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
1403         if (pmd->ioctl_sock == -1) {
1404                 RTE_LOG(ERR, PMD,
1405                         "%s Unable to get a socket for management: %s\n",
1406                         tuntap_name, strerror(errno));
1407                 goto error_exit;
1408         }
1409
1410         /* Setup some default values */
1411         data = dev->data;
1412         data->dev_private = pmd;
1413         data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1414         data->numa_node = numa_node;
1415
1416         data->dev_link = pmd_link;
1417         data->mac_addrs = &pmd->eth_addr;
1418         /* Set the number of RX and TX queues */
1419         data->nb_rx_queues = 0;
1420         data->nb_tx_queues = 0;
1421
1422         dev->dev_ops = &ops;
1423         dev->rx_pkt_burst = pmd_rx_burst;
1424         dev->tx_pkt_burst = pmd_tx_burst;
1425
1426         pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
1427         pmd->intr_handle.fd = -1;
1428         dev->intr_handle = &pmd->intr_handle;
1429
1430         /* Presetup the fds to -1 as being not valid */
1431         for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1432                 pmd->rxq[i].fd = -1;
1433                 pmd->txq[i].fd = -1;
1434         }
1435
1436         if (tap_type) {
1437                 if (is_zero_ether_addr(mac_addr))
1438                         eth_random_addr((uint8_t *)&pmd->eth_addr);
1439                 else
1440                         rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr));
1441         }
1442
1443         /* Immediately create the netdevice (this will create the 1st queue). */
1444         /* rx queue */
1445         if (tap_setup_queue(dev, pmd, 0, 1) == -1)
1446                 goto error_exit;
1447         /* tx queue */
1448         if (tap_setup_queue(dev, pmd, 0, 0) == -1)
1449                 goto error_exit;
1450
1451         ifr.ifr_mtu = dev->data->mtu;
1452         if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
1453                 goto error_exit;
1454
1455         if (tap_type) {
1456                 memset(&ifr, 0, sizeof(struct ifreq));
1457                 ifr.ifr_hwaddr.sa_family = AF_LOCAL;
1458                 rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
1459                                 ETHER_ADDR_LEN);
1460                 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
1461                         goto error_exit;
1462         }
1463
1464         /*
1465          * Set up everything related to rte_flow:
1466          * - netlink socket
1467          * - tap / remote if_index
1468          * - mandatory QDISCs
1469          * - rte_flow actual/implicit lists
1470          * - implicit rules
1471          */
1472         pmd->nlsk_fd = tap_nl_init(0);
1473         if (pmd->nlsk_fd == -1) {
1474                 RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n",
1475                         pmd->name);
1476                 goto disable_rte_flow;
1477         }
1478         pmd->if_index = if_nametoindex(pmd->name);
1479         if (!pmd->if_index) {
1480                 RTE_LOG(ERR, PMD, "%s: failed to get if_index.\n", pmd->name);
1481                 goto disable_rte_flow;
1482         }
1483         if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
1484                 RTE_LOG(ERR, PMD, "%s: failed to create multiq qdisc.\n",
1485                         pmd->name);
1486                 goto disable_rte_flow;
1487         }
1488         if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
1489                 RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
1490                         pmd->name);
1491                 goto disable_rte_flow;
1492         }
1493         LIST_INIT(&pmd->flows);
1494
1495         if (strlen(remote_iface)) {
1496                 pmd->remote_if_index = if_nametoindex(remote_iface);
1497                 if (!pmd->remote_if_index) {
1498                         RTE_LOG(ERR, PMD, "%s: failed to get %s if_index.\n",
1499                                 pmd->name, remote_iface);
1500                         goto error_remote;
1501                 }
1502                 snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
1503                          "%s", remote_iface);
1504
1505                 /* Save state of remote device */
1506                 tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
1507
1508                 /* Replicate remote MAC address */
1509                 if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
1510                         RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
1511                                 pmd->name, pmd->remote_iface);
1512                         goto error_remote;
1513                 }
1514                 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
1515                            ETHER_ADDR_LEN);
1516                 /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
1517                 if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
1518                         RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
1519                                 pmd->name, remote_iface);
1520                         goto error_remote;
1521                 }
1522
1523                 /*
1524                  * Flush usually returns negative value because it tries to
1525                  * delete every QDISC (and on a running device, one QDISC at
1526                  * least is needed). Ignore negative return value.
1527                  */
1528                 qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
1529                 if (qdisc_create_ingress(pmd->nlsk_fd,
1530                                          pmd->remote_if_index) < 0) {
1531                         RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
1532                                 pmd->remote_iface);
1533                         goto error_remote;
1534                 }
1535                 LIST_INIT(&pmd->implicit_flows);
1536                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
1537                     tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
1538                     tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
1539                     tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
1540                         RTE_LOG(ERR, PMD,
1541                                 "%s: failed to create implicit rules.\n",
1542                                 pmd->name);
1543                         goto error_remote;
1544                 }
1545         }
1546
1547         return 0;
1548
1549 disable_rte_flow:
1550         RTE_LOG(ERR, PMD, " Disabling rte flow support: %s(%d)\n",
1551                 strerror(errno), errno);
1552         if (strlen(remote_iface)) {
1553                 RTE_LOG(ERR, PMD, "Remote feature requires flow support.\n");
1554                 goto error_exit;
1555         }
1556         return 0;
1557
1558 error_remote:
1559         RTE_LOG(ERR, PMD, " Can't set up remote feature: %s(%d)\n",
1560                 strerror(errno), errno);
1561         tap_flow_implicit_flush(pmd, NULL);
1562
1563 error_exit:
1564         if (pmd->ioctl_sock > 0)
1565                 close(pmd->ioctl_sock);
1566         rte_eth_dev_release_port(dev);
1567
1568 error_exit_nodev:
1569         RTE_LOG(ERR, PMD, "%s Unable to initialize %s\n",
1570                 tuntap_name, rte_vdev_device_name(vdev));
1571
1572         return -EINVAL;
1573 }
1574
1575 static int
1576 set_interface_name(const char *key __rte_unused,
1577                    const char *value,
1578                    void *extra_args)
1579 {
1580         char *name = (char *)extra_args;
1581
1582         if (value)
1583                 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN - 1);
1584         else
1585                 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
1586                          DEFAULT_TAP_NAME, (tap_unit - 1));
1587
1588         return 0;
1589 }
1590
1591 static int
1592 set_remote_iface(const char *key __rte_unused,
1593                  const char *value,
1594                  void *extra_args)
1595 {
1596         char *name = (char *)extra_args;
1597
1598         if (value)
1599                 strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
1600
1601         return 0;
1602 }
1603
1604 static int parse_user_mac(struct ether_addr *user_mac,
1605                 const char *value)
1606 {
1607         unsigned int index = 0;
1608         char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
1609
1610         if (user_mac == NULL || value == NULL)
1611                 return 0;
1612
1613         strlcpy(mac_temp, value, sizeof(mac_temp));
1614         mac_byte = strtok(mac_temp, ":");
1615
1616         while ((mac_byte != NULL) &&
1617                         (strlen(mac_byte) <= 2) &&
1618                         (strlen(mac_byte) == strspn(mac_byte,
1619                                         ETH_TAP_CMP_MAC_FMT))) {
1620                 user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
1621                 mac_byte = strtok(NULL, ":");
1622         }
1623
1624         return index;
1625 }
1626
1627 static int
1628 set_mac_type(const char *key __rte_unused,
1629              const char *value,
1630              void *extra_args)
1631 {
1632         struct ether_addr *user_mac = extra_args;
1633
1634         if (!value)
1635                 return 0;
1636
1637         if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
1638                 static int iface_idx;
1639
1640                 /* fixed mac = 00:64:74:61:70:<iface_idx> */
1641                 memcpy((char *)user_mac->addr_bytes, "\0dtap", ETHER_ADDR_LEN);
1642                 user_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0';
1643                 goto success;
1644         }
1645
1646         if (parse_user_mac(user_mac, value) != 6)
1647                 goto error;
1648 success:
1649         RTE_LOG(DEBUG, PMD, "TAP user MAC param (%s)\n", value);
1650         return 0;
1651
1652 error:
1653         RTE_LOG(ERR, PMD, "TAP user MAC (%s) is not in format (%s|%s)\n",
1654                 value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
1655         return -1;
1656 }
1657
1658 /*
1659  * Open a TUN interface device. TUN PMD
1660  * 1) sets tap_type as false
1661  * 2) intakes iface as argument.
1662  * 3) as interface is virtual set speed to 10G
1663  */
1664 static int
1665 rte_pmd_tun_probe(struct rte_vdev_device *dev)
1666 {
1667         const char *name, *params;
1668         int ret;
1669         struct rte_kvargs *kvlist = NULL;
1670         char tun_name[RTE_ETH_NAME_MAX_LEN];
1671         char remote_iface[RTE_ETH_NAME_MAX_LEN];
1672
1673         tap_type = 0;
1674         strcpy(tuntap_name, "TUN");
1675
1676         name = rte_vdev_device_name(dev);
1677         params = rte_vdev_device_args(dev);
1678         memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
1679
1680         if (params && (params[0] != '\0')) {
1681                 RTE_LOG(DEBUG, PMD, "parameters (%s)\n", params);
1682
1683                 kvlist = rte_kvargs_parse(params, valid_arguments);
1684                 if (kvlist) {
1685                         if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
1686                                 ret = rte_kvargs_process(kvlist,
1687                                         ETH_TAP_IFACE_ARG,
1688                                         &set_interface_name,
1689                                         tun_name);
1690
1691                                 if (ret == -1)
1692                                         goto leave;
1693                         }
1694                 }
1695         }
1696         pmd_link.link_speed = ETH_SPEED_NUM_10G;
1697
1698         RTE_LOG(NOTICE, PMD, "Initializing pmd_tun for %s as %s\n",
1699                 name, tun_name);
1700
1701         ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0);
1702
1703 leave:
1704         if (ret == -1) {
1705                 RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
1706                         name, tun_name);
1707                 tun_unit--; /* Restore the unit number */
1708         }
1709         rte_kvargs_free(kvlist);
1710
1711         return ret;
1712 }
1713
1714 /* Open a TAP interface device.
1715  */
1716 static int
1717 rte_pmd_tap_probe(struct rte_vdev_device *dev)
1718 {
1719         const char *name, *params;
1720         int ret;
1721         struct rte_kvargs *kvlist = NULL;
1722         int speed;
1723         char tap_name[RTE_ETH_NAME_MAX_LEN];
1724         char remote_iface[RTE_ETH_NAME_MAX_LEN];
1725         struct ether_addr user_mac = { .addr_bytes = {0} };
1726         struct rte_eth_dev *eth_dev;
1727
1728         tap_type = 1;
1729         strcpy(tuntap_name, "TAP");
1730
1731         name = rte_vdev_device_name(dev);
1732         params = rte_vdev_device_args(dev);
1733
1734         if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1735             strlen(params) == 0) {
1736                 eth_dev = rte_eth_dev_attach_secondary(name);
1737                 if (!eth_dev) {
1738                         RTE_LOG(ERR, PMD, "Failed to probe %s\n", name);
1739                         return -1;
1740                 }
1741                 /* TODO: request info from primary to set up Rx and Tx */
1742                 eth_dev->dev_ops = &ops;
1743                 return 0;
1744         }
1745
1746         speed = ETH_SPEED_NUM_10G;
1747         snprintf(tap_name, sizeof(tap_name), "%s%d",
1748                  DEFAULT_TAP_NAME, tap_unit++);
1749         memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
1750
1751         if (params && (params[0] != '\0')) {
1752                 RTE_LOG(DEBUG, PMD, "parameters (%s)\n", params);
1753
1754                 kvlist = rte_kvargs_parse(params, valid_arguments);
1755                 if (kvlist) {
1756                         if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
1757                                 ret = rte_kvargs_process(kvlist,
1758                                                          ETH_TAP_IFACE_ARG,
1759                                                          &set_interface_name,
1760                                                          tap_name);
1761                                 if (ret == -1)
1762                                         goto leave;
1763                         }
1764
1765                         if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
1766                                 ret = rte_kvargs_process(kvlist,
1767                                                          ETH_TAP_REMOTE_ARG,
1768                                                          &set_remote_iface,
1769                                                          remote_iface);
1770                                 if (ret == -1)
1771                                         goto leave;
1772                         }
1773
1774                         if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
1775                                 ret = rte_kvargs_process(kvlist,
1776                                                          ETH_TAP_MAC_ARG,
1777                                                          &set_mac_type,
1778                                                          &user_mac);
1779                                 if (ret == -1)
1780                                         goto leave;
1781                         }
1782                 }
1783         }
1784         pmd_link.link_speed = speed;
1785
1786         RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
1787                 name, tap_name);
1788
1789         ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac);
1790
1791 leave:
1792         if (ret == -1) {
1793                 RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
1794                         name, tap_name);
1795                 tap_unit--;             /* Restore the unit number */
1796         }
1797         rte_kvargs_free(kvlist);
1798
1799         return ret;
1800 }
1801
1802 /* detach a TUNTAP device.
1803  */
1804 static int
1805 rte_pmd_tap_remove(struct rte_vdev_device *dev)
1806 {
1807         struct rte_eth_dev *eth_dev = NULL;
1808         struct pmd_internals *internals;
1809         int i;
1810
1811         RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n",
1812                 rte_socket_id());
1813
1814         /* find the ethdev entry */
1815         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1816         if (!eth_dev)
1817                 return 0;
1818
1819         internals = eth_dev->data->dev_private;
1820         if (internals->nlsk_fd) {
1821                 tap_flow_flush(eth_dev, NULL);
1822                 tap_flow_implicit_flush(internals, NULL);
1823                 tap_nl_final(internals->nlsk_fd);
1824         }
1825         for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
1826                 if (internals->rxq[i].fd != -1) {
1827                         close(internals->rxq[i].fd);
1828                         internals->rxq[i].fd = -1;
1829                 }
1830                 if (internals->txq[i].fd != -1) {
1831                         close(internals->txq[i].fd);
1832                         internals->txq[i].fd = -1;
1833                 }
1834         }
1835
1836         close(internals->ioctl_sock);
1837         rte_free(eth_dev->data->dev_private);
1838
1839         rte_eth_dev_release_port(eth_dev);
1840
1841         return 0;
1842 }
1843
1844 static struct rte_vdev_driver pmd_tun_drv = {
1845         .probe = rte_pmd_tun_probe,
1846         .remove = rte_pmd_tap_remove,
1847 };
1848
1849 static struct rte_vdev_driver pmd_tap_drv = {
1850         .probe = rte_pmd_tap_probe,
1851         .remove = rte_pmd_tap_remove,
1852 };
1853 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
1854 RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv);
1855 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
1856 RTE_PMD_REGISTER_PARAM_STRING(net_tun,
1857                               ETH_TAP_IFACE_ARG "=<string> ");
1858 RTE_PMD_REGISTER_PARAM_STRING(net_tap,
1859                               ETH_TAP_IFACE_ARG "=<string> "
1860                               ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
1861                               ETH_TAP_REMOTE_ARG "=<string>");