4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_atomic.h>
35 #include <rte_common.h>
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
40 #include <rte_kvargs.h>
42 #include <sys/types.h>
44 #include <sys/socket.h>
45 #include <sys/ioctl.h>
51 #include <arpa/inet.h>
53 #include <linux/if_tun.h>
54 #include <linux/if_ether.h>
57 /* Linux based path to the TUN device */
58 #define TUN_TAP_DEV_PATH "/dev/net/tun"
59 #define DEFAULT_TAP_NAME "dtap"
61 #define ETH_TAP_IFACE_ARG "iface"
62 #define ETH_TAP_SPEED_ARG "speed"
64 #ifdef IFF_MULTI_QUEUE
65 #define RTE_PMD_TAP_MAX_QUEUES 16
67 #define RTE_PMD_TAP_MAX_QUEUES 1
70 static struct rte_vdev_driver pmd_tap_drv;
72 static const char *valid_arguments[] = {
80 static volatile uint32_t tap_trigger; /* Rx trigger */
82 static struct rte_eth_link pmd_link = {
83 .link_speed = ETH_SPEED_NUM_10G,
84 .link_duplex = ETH_LINK_FULL_DUPLEX,
85 .link_status = ETH_LINK_DOWN,
86 .link_autoneg = ETH_LINK_SPEED_AUTONEG
90 uint64_t opackets; /* Number of output packets */
91 uint64_t ipackets; /* Number of input packets */
92 uint64_t obytes; /* Number of bytes on output */
93 uint64_t ibytes; /* Number of bytes on input */
94 uint64_t errs; /* Number of error packets */
98 struct rte_mempool *mp; /* Mempool for RX packets */
99 uint32_t trigger_seen; /* Last seen Rx trigger value */
100 uint16_t in_port; /* Port ID */
103 struct pkt_stats stats; /* Stats for this RX queue */
108 struct pkt_stats stats; /* Stats for this TX queue */
111 struct pmd_internals {
112 char name[RTE_ETH_NAME_MAX_LEN]; /* Internal Tap device name */
113 uint16_t nb_queues; /* Number of queues supported */
114 struct ether_addr eth_addr; /* Mac address of the device port */
116 int if_index; /* IF_INDEX for the port */
118 struct rx_queue rxq[RTE_PMD_TAP_MAX_QUEUES]; /* List of RX queues */
119 struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
123 tap_trigger_cb(int sig __rte_unused)
125 /* Valid trigger values are nonzero */
126 tap_trigger = (tap_trigger + 1) | 0x80000000;
129 /* Tun/Tap allocation routine
131 * name is the number of the interface to use, unless NULL to take the host
135 tun_alloc(struct pmd_internals *pmd, uint16_t qid)
138 #ifdef IFF_MULTI_QUEUE
139 unsigned int features;
143 memset(&ifr, 0, sizeof(struct ifreq));
145 ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
146 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
148 RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name);
150 fd = open(TUN_TAP_DEV_PATH, O_RDWR);
152 RTE_LOG(ERR, PMD, "Unable to create TAP interface");
156 #ifdef IFF_MULTI_QUEUE
157 /* Grab the TUN features to verify we can work multi-queue */
158 if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
159 RTE_LOG(ERR, PMD, "TAP unable to get TUN/TAP features\n");
162 RTE_LOG(DEBUG, PMD, " TAP Features %08x\n", features);
164 if (features & IFF_MULTI_QUEUE) {
165 RTE_LOG(DEBUG, PMD, " Multi-queue support for %d queues\n",
166 RTE_PMD_TAP_MAX_QUEUES);
167 ifr.ifr_flags |= IFF_MULTI_QUEUE;
171 ifr.ifr_flags |= IFF_ONE_QUEUE;
172 RTE_LOG(DEBUG, PMD, " Single queue only support\n");
175 /* Set the TUN/TAP configuration and set the name if needed */
176 if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
177 RTE_LOG(WARNING, PMD,
178 "Unable to set TUNSETIFF for %s\n",
184 /* Always set the file descriptor to non-blocking */
185 if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
186 RTE_LOG(WARNING, PMD,
187 "Unable to set %s to nonblocking\n",
189 perror("F_SETFL, NONBLOCK");
193 /* Set up trigger to optimize empty Rx bursts */
197 int flags = fcntl(fd, F_GETFL);
199 if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
201 if (sa.sa_handler != tap_trigger_cb) {
203 * Make sure SIGIO is not already taken. This is done
204 * as late as possible to leave the application a
205 * chance to set up its own signal handler first.
207 if (sa.sa_handler != SIG_IGN &&
208 sa.sa_handler != SIG_DFL) {
212 sa = (struct sigaction){
213 .sa_flags = SA_RESTART,
214 .sa_handler = tap_trigger_cb,
216 if (sigaction(SIGIO, &sa, NULL) == -1)
219 /* Enable SIGIO on file descriptor */
220 fcntl(fd, F_SETFL, flags | O_ASYNC);
221 fcntl(fd, F_SETOWN, getpid());
224 /* Disable trigger globally in case of error */
226 RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n",
231 if (ioctl(fd, SIOCGIFHWADDR, &ifr) == -1) {
232 RTE_LOG(ERR, PMD, "ioctl failed (SIOCGIFHWADDR) (%s)\n",
237 rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
248 /* Callback to handle the rx burst of packets to the correct interface and
249 * file descriptor(s) in a multi-queue setup.
252 pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
255 struct rte_mbuf *mbuf;
256 struct rx_queue *rxq = queue;
258 unsigned long num_rx_bytes = 0;
259 uint32_t trigger = tap_trigger;
261 if (trigger == rxq->trigger_seen)
264 rxq->trigger_seen = trigger;
265 rte_compiler_barrier();
266 for (num_rx = 0; num_rx < nb_pkts; ) {
267 /* allocate the next mbuf */
268 mbuf = rte_pktmbuf_alloc(rxq->mp);
269 if (unlikely(!mbuf)) {
270 RTE_LOG(WARNING, PMD, "TAP unable to allocate mbuf\n");
274 len = read(rxq->fd, rte_pktmbuf_mtod(mbuf, char *),
275 rte_pktmbuf_tailroom(mbuf));
277 rte_pktmbuf_free(mbuf);
281 mbuf->data_len = len;
283 mbuf->port = rxq->in_port;
285 /* account for the receive frame */
286 bufs[num_rx++] = mbuf;
287 num_rx_bytes += mbuf->pkt_len;
289 rxq->stats.ipackets += num_rx;
290 rxq->stats.ibytes += num_rx_bytes;
295 /* Callback to handle sending packets from the tap interface
298 pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
300 struct rte_mbuf *mbuf;
301 struct tx_queue *txq = queue;
303 unsigned long num_tx_bytes = 0;
306 if (unlikely(nb_pkts == 0))
309 for (i = 0; i < nb_pkts; i++) {
310 /* copy the tx frame data */
313 rte_pktmbuf_mtod(mbuf, void *),
314 rte_pktmbuf_pkt_len(mbuf));
319 num_tx_bytes += mbuf->pkt_len;
320 rte_pktmbuf_free(mbuf);
323 txq->stats.opackets += num_tx;
324 txq->stats.errs += nb_pkts - num_tx;
325 txq->stats.obytes += num_tx_bytes;
331 tap_link_set_flags(struct pmd_internals *pmd, short flags, int add)
337 * An AF_INET/DGRAM socket is needed for
338 * SIOCGIFFLAGS/SIOCSIFFLAGS, using fd won't work.
340 s = socket(AF_INET, SOCK_DGRAM, 0);
343 "Unable to get a socket to set flags: %s\n",
347 memset(&ifr, 0, sizeof(ifr));
348 snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
349 err = ioctl(s, SIOCGIFFLAGS, &ifr);
351 RTE_LOG(WARNING, PMD, "Unable to get %s device flags: %s\n",
352 pmd->name, strerror(errno));
357 ifr.ifr_flags |= flags;
359 ifr.ifr_flags &= ~flags;
360 err = ioctl(s, SIOCSIFFLAGS, &ifr);
362 RTE_LOG(WARNING, PMD, "Unable to %s flags 0x%x: %s\n",
363 add ? "set" : "unset", flags, strerror(errno));
373 tap_link_set_down(struct rte_eth_dev *dev)
375 struct pmd_internals *pmd = dev->data->dev_private;
377 dev->data->dev_link.link_status = ETH_LINK_DOWN;
378 return tap_link_set_flags(pmd, IFF_UP | IFF_NOARP, 0);
382 tap_link_set_up(struct rte_eth_dev *dev)
384 struct pmd_internals *pmd = dev->data->dev_private;
386 dev->data->dev_link.link_status = ETH_LINK_UP;
387 return tap_link_set_flags(pmd, IFF_UP | IFF_NOARP, 1);
391 tap_dev_start(struct rte_eth_dev *dev)
393 return tap_link_set_up(dev);
396 /* This function gets called when the current port gets stopped.
399 tap_dev_stop(struct rte_eth_dev *dev)
401 tap_link_set_down(dev);
405 tap_dev_configure(struct rte_eth_dev *dev __rte_unused)
411 tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
413 struct pmd_internals *internals = dev->data->dev_private;
415 dev_info->if_index = internals->if_index;
416 dev_info->max_mac_addrs = 1;
417 dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
418 dev_info->max_rx_queues = internals->nb_queues;
419 dev_info->max_tx_queues = internals->nb_queues;
420 dev_info->min_rx_bufsize = 0;
421 dev_info->pci_dev = NULL;
425 tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
427 unsigned int i, imax;
428 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
429 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
430 const struct pmd_internals *pmd = dev->data->dev_private;
432 imax = (pmd->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
433 pmd->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
435 for (i = 0; i < imax; i++) {
436 tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
437 tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
438 rx_total += tap_stats->q_ipackets[i];
439 rx_bytes_total += tap_stats->q_ibytes[i];
441 tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
442 tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
443 tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
444 tx_total += tap_stats->q_opackets[i];
445 tx_err_total += tap_stats->q_errors[i];
446 tx_bytes_total += tap_stats->q_obytes[i];
449 tap_stats->ipackets = rx_total;
450 tap_stats->ibytes = rx_bytes_total;
451 tap_stats->opackets = tx_total;
452 tap_stats->oerrors = tx_err_total;
453 tap_stats->obytes = tx_bytes_total;
457 tap_stats_reset(struct rte_eth_dev *dev)
460 struct pmd_internals *pmd = dev->data->dev_private;
462 for (i = 0; i < pmd->nb_queues; i++) {
463 pmd->rxq[i].stats.ipackets = 0;
464 pmd->rxq[i].stats.ibytes = 0;
466 pmd->txq[i].stats.opackets = 0;
467 pmd->txq[i].stats.errs = 0;
468 pmd->txq[i].stats.obytes = 0;
473 tap_dev_close(struct rte_eth_dev *dev __rte_unused)
476 struct pmd_internals *internals = dev->data->dev_private;
478 tap_link_set_down(dev);
480 for (i = 0; i < internals->nb_queues; i++) {
481 if (internals->rxq[i].fd != -1)
482 close(internals->rxq[i].fd);
483 internals->rxq[i].fd = -1;
484 internals->txq[i].fd = -1;
489 tap_rx_queue_release(void *queue)
491 struct rx_queue *rxq = queue;
493 if (rxq && (rxq->fd > 0)) {
500 tap_tx_queue_release(void *queue)
502 struct tx_queue *txq = queue;
504 if (txq && (txq->fd > 0)) {
511 tap_link_update(struct rte_eth_dev *dev __rte_unused,
512 int wait_to_complete __rte_unused)
518 tap_promisc_enable(struct rte_eth_dev *dev)
520 struct pmd_internals *pmd = dev->data->dev_private;
522 dev->data->promiscuous = 1;
523 tap_link_set_flags(pmd, IFF_PROMISC, 1);
527 tap_promisc_disable(struct rte_eth_dev *dev)
529 struct pmd_internals *pmd = dev->data->dev_private;
531 dev->data->promiscuous = 0;
532 tap_link_set_flags(pmd, IFF_PROMISC, 0);
536 tap_allmulti_enable(struct rte_eth_dev *dev)
538 struct pmd_internals *pmd = dev->data->dev_private;
540 dev->data->all_multicast = 1;
541 tap_link_set_flags(pmd, IFF_ALLMULTI, 1);
545 tap_allmulti_disable(struct rte_eth_dev *dev)
547 struct pmd_internals *pmd = dev->data->dev_private;
549 dev->data->all_multicast = 0;
550 tap_link_set_flags(pmd, IFF_ALLMULTI, 0);
554 tap_setup_queue(struct rte_eth_dev *dev,
555 struct pmd_internals *internals,
558 struct pmd_internals *pmd = dev->data->dev_private;
559 struct rx_queue *rx = &internals->rxq[qid];
560 struct tx_queue *tx = &internals->txq[qid];
567 RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n",
569 fd = tun_alloc(pmd, qid);
571 RTE_LOG(ERR, PMD, "tun_alloc(%s, %d) failed\n",
585 rx_setup_queue(struct rte_eth_dev *dev,
586 struct pmd_internals *internals,
589 dev->data->rx_queues[qid] = &internals->rxq[qid];
591 return tap_setup_queue(dev, internals, qid);
595 tx_setup_queue(struct rte_eth_dev *dev,
596 struct pmd_internals *internals,
599 dev->data->tx_queues[qid] = &internals->txq[qid];
601 return tap_setup_queue(dev, internals, qid);
605 tap_rx_queue_setup(struct rte_eth_dev *dev,
606 uint16_t rx_queue_id,
607 uint16_t nb_rx_desc __rte_unused,
608 unsigned int socket_id __rte_unused,
609 const struct rte_eth_rxconf *rx_conf __rte_unused,
610 struct rte_mempool *mp)
612 struct pmd_internals *internals = dev->data->dev_private;
616 if ((rx_queue_id >= internals->nb_queues) || !mp) {
617 RTE_LOG(WARNING, PMD,
618 "nb_queues %d too small or mempool NULL\n",
619 internals->nb_queues);
623 internals->rxq[rx_queue_id].mp = mp;
624 internals->rxq[rx_queue_id].trigger_seen = 1; /* force initial burst */
625 internals->rxq[rx_queue_id].in_port = dev->data->port_id;
627 /* Now get the space available for data in the mbuf */
628 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
629 RTE_PKTMBUF_HEADROOM);
631 if (buf_size < ETH_FRAME_LEN) {
632 RTE_LOG(WARNING, PMD,
633 "%s: %d bytes will not fit in mbuf (%d bytes)\n",
634 dev->data->name, ETH_FRAME_LEN, buf_size);
638 fd = rx_setup_queue(dev, internals, rx_queue_id);
642 RTE_LOG(DEBUG, PMD, " RX TAP device name %s, qid %d on fd %d\n",
643 internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
649 tap_tx_queue_setup(struct rte_eth_dev *dev,
650 uint16_t tx_queue_id,
651 uint16_t nb_tx_desc __rte_unused,
652 unsigned int socket_id __rte_unused,
653 const struct rte_eth_txconf *tx_conf __rte_unused)
655 struct pmd_internals *internals = dev->data->dev_private;
658 if (tx_queue_id >= internals->nb_queues)
661 ret = tx_setup_queue(dev, internals, tx_queue_id);
665 RTE_LOG(DEBUG, PMD, " TX TAP device name %s, qid %d on fd %d\n",
666 internals->name, tx_queue_id, internals->txq[tx_queue_id].fd);
671 static const struct eth_dev_ops ops = {
672 .dev_start = tap_dev_start,
673 .dev_stop = tap_dev_stop,
674 .dev_close = tap_dev_close,
675 .dev_configure = tap_dev_configure,
676 .dev_infos_get = tap_dev_info,
677 .rx_queue_setup = tap_rx_queue_setup,
678 .tx_queue_setup = tap_tx_queue_setup,
679 .rx_queue_release = tap_rx_queue_release,
680 .tx_queue_release = tap_tx_queue_release,
681 .link_update = tap_link_update,
682 .dev_set_link_up = tap_link_set_up,
683 .dev_set_link_down = tap_link_set_down,
684 .promiscuous_enable = tap_promisc_enable,
685 .promiscuous_disable = tap_promisc_disable,
686 .allmulticast_enable = tap_allmulti_enable,
687 .allmulticast_disable = tap_allmulti_disable,
688 .stats_get = tap_stats_get,
689 .stats_reset = tap_stats_reset,
693 eth_dev_tap_create(const char *name, char *tap_name)
695 int numa_node = rte_socket_id();
696 struct rte_eth_dev *dev = NULL;
697 struct pmd_internals *pmd = NULL;
698 struct rte_eth_dev_data *data = NULL;
701 RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
703 data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node);
705 RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n");
709 pmd = rte_zmalloc_socket(tap_name, sizeof(*pmd), 0, numa_node);
711 RTE_LOG(ERR, PMD, "TAP Unable to allocate internal struct\n");
715 /* name in allocation and data->name must be consistent */
716 snprintf(data->name, sizeof(data->name), "%s", name);
717 dev = rte_eth_dev_allocate(name);
719 RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n");
723 snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
725 pmd->nb_queues = RTE_PMD_TAP_MAX_QUEUES;
727 /* Setup some default values */
728 data->dev_private = pmd;
729 data->port_id = dev->data->port_id;
730 data->dev_flags = RTE_ETH_DEV_DETACHABLE;
731 data->kdrv = RTE_KDRV_NONE;
732 data->drv_name = pmd_tap_drv.driver.name;
733 data->numa_node = numa_node;
735 data->dev_link = pmd_link;
736 data->mac_addrs = &pmd->eth_addr;
737 data->nb_rx_queues = pmd->nb_queues;
738 data->nb_tx_queues = pmd->nb_queues;
743 dev->rx_pkt_burst = pmd_rx_burst;
744 dev->tx_pkt_burst = pmd_tx_burst;
746 /* Presetup the fds to -1 as being not valid */
747 for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
755 RTE_LOG(DEBUG, PMD, "TAP Unable to initialize %s\n", name);
760 rte_eth_dev_release_port(dev);
766 set_interface_name(const char *key __rte_unused,
770 char *name = (char *)extra_args;
773 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s", value);
775 snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
776 DEFAULT_TAP_NAME, (tap_unit - 1));
782 set_interface_speed(const char *key __rte_unused,
786 *(int *)extra_args = (value) ? atoi(value) : ETH_SPEED_NUM_10G;
791 /* Open a TAP interface device.
794 rte_pmd_tap_probe(const char *name, const char *params)
797 struct rte_kvargs *kvlist = NULL;
799 char tap_name[RTE_ETH_NAME_MAX_LEN];
801 speed = ETH_SPEED_NUM_10G;
802 snprintf(tap_name, sizeof(tap_name), "%s%d",
803 DEFAULT_TAP_NAME, tap_unit++);
805 if (params && (params[0] != '\0')) {
806 RTE_LOG(DEBUG, PMD, "paramaters (%s)\n", params);
808 kvlist = rte_kvargs_parse(params, valid_arguments);
810 if (rte_kvargs_count(kvlist, ETH_TAP_SPEED_ARG) == 1) {
811 ret = rte_kvargs_process(kvlist,
813 &set_interface_speed,
819 if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
820 ret = rte_kvargs_process(kvlist,
829 pmd_link.link_speed = speed;
831 RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
834 ret = eth_dev_tap_create(name, tap_name);
838 RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
840 tap_unit--; /* Restore the unit number */
842 rte_kvargs_free(kvlist);
847 /* detach a TAP device.
850 rte_pmd_tap_remove(const char *name)
852 struct rte_eth_dev *eth_dev = NULL;
853 struct pmd_internals *internals;
856 RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n",
859 /* find the ethdev entry */
860 eth_dev = rte_eth_dev_allocated(name);
864 internals = eth_dev->data->dev_private;
865 for (i = 0; i < internals->nb_queues; i++)
866 if (internals->rxq[i].fd != -1)
867 close(internals->rxq[i].fd);
869 rte_free(eth_dev->data->dev_private);
870 rte_free(eth_dev->data);
872 rte_eth_dev_release_port(eth_dev);
877 static struct rte_vdev_driver pmd_tap_drv = {
878 .probe = rte_pmd_tap_probe,
879 .remove = rte_pmd_tap_remove,
881 RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
882 RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
883 RTE_PMD_REGISTER_PARAM_STRING(net_tap, "iface=<string>,speed=N");