X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftap%2Frte_eth_tap.c;h=fd425726aa53799b4f4372a791aace3c880f1a39;hb=80554894c4618f6f3bd202565bc881215d99dde6;hp=9acea83983ae869075762a842d84b250e1410379;hpb=7c2d03d65f5e77705dbfbafdef4acab1594ee290;p=dpdk.git diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c index 9acea83983..fd425726aa 100644 --- a/drivers/net/tap/rte_eth_tap.c +++ b/drivers/net/tap/rte_eth_tap.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include @@ -36,10 +7,10 @@ #include #include #include -#include +#include #include #include -#include +#include #include #include #include @@ -53,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -60,7 +32,6 @@ #include #include #include -#include #include #include @@ -78,9 +49,6 @@ #define ETH_TAP_MAC_ARG "mac" #define ETH_TAP_MAC_FIXED "fixed" -#define FLOWER_KERNEL_VERSION KERNEL_VERSION(4, 2, 0) -#define FLOWER_VLAN_KERNEL_VERSION KERNEL_VERSION(4, 9, 0) - static struct rte_vdev_driver pmd_tap_drv; static const char *valid_arguments[] = { @@ -99,7 +67,7 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_SPEED_AUTONEG + .link_autoneg = ETH_LINK_AUTONEG }; static void @@ -285,6 +253,43 @@ tap_verify_csum(struct rte_mbuf *mbuf) } } +static uint64_t +tap_rx_offload_get_port_capa(void) +{ + /* + * In order to support legacy apps, + * report capabilities also as port capabilities. + */ + return DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; +} + +static uint64_t +tap_rx_offload_get_queue_capa(void) +{ + return DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; +} + +static bool +tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads) +{ + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; + uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa(); + uint64_t port_supp_offloads = tap_rx_offload_get_port_capa(); + + if ((offloads & (queue_supp_offloads | port_supp_offloads)) != + offloads) + return false; + if ((port_offloads ^ offloads) & port_supp_offloads) + return false; + return true; +} + /* Callback to handle the rx burst of packets to the correct interface and * file descriptor(s) in a multi-queue setup. */ @@ -309,8 +314,9 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) int len; len = readv(rxq->fd, *rxq->iovecs, - 1 + (rxq->rxmode->enable_scatter ? - rxq->nb_rx_desc : 1)); + 1 + + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ? + rxq->nb_rx_desc : 1)); if (len < (int)sizeof(struct tun_pi)) break; @@ -365,7 +371,7 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) seg->next = NULL; mbuf->packet_type = rte_net_get_ptype(mbuf, NULL, RTE_PTYPE_ALL_MASK); - if (rxq->rxmode->hw_ip_checksum) + if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) tap_verify_csum(mbuf); /* account for the receive frame */ @@ -379,6 +385,42 @@ end: return num_rx; } +static uint64_t +tap_tx_offload_get_port_capa(void) +{ + /* + * In order to support legacy apps, + * report capabilities also as port capabilities. + */ + return DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; +} + +static uint64_t +tap_tx_offload_get_queue_capa(void) +{ + return DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; +} + +static bool +tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads) +{ + uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa(); + uint64_t port_supp_offloads = tap_tx_offload_get_port_capa(); + + if ((offloads & (queue_supp_offloads | port_supp_offloads)) != + offloads) + return false; + /* Verify we have no conflict with port offloads */ + if ((port_offloads ^ offloads) & port_supp_offloads) + return false; + return true; +} + static void tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len, unsigned int l3_len) @@ -465,9 +507,10 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) rte_pktmbuf_mtod(seg, void *); seg = seg->next; } - if (mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) || - (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM || - (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) { + if (txq->csum && + ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) || + (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM || + (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) { /* Support only packets with all data in the same seg */ if (mbuf->nb_segs > 1) break; @@ -603,8 +646,42 @@ tap_dev_stop(struct rte_eth_dev *dev) } static int -tap_dev_configure(struct rte_eth_dev *dev __rte_unused) +tap_dev_configure(struct rte_eth_dev *dev) { + uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa(); + uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; + + if ((tx_offloads & supp_tx_offloads) != tx_offloads) { + rte_errno = ENOTSUP; + RTE_LOG(ERR, PMD, + "Some Tx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", + tx_offloads, supp_tx_offloads); + return -rte_errno; + } + if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) { + RTE_LOG(ERR, PMD, + "%s: number of rx queues %d exceeds max num of queues %d\n", + dev->device->name, + dev->data->nb_rx_queues, + RTE_PMD_TAP_MAX_QUEUES); + return -1; + } + if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) { + RTE_LOG(ERR, PMD, + "%s: number of tx queues %d exceeds max num of queues %d\n", + dev->device->name, + dev->data->nb_tx_queues, + RTE_PMD_TAP_MAX_QUEUES); + return -1; + } + + RTE_LOG(INFO, PMD, "%s: %p: TX configured queues number: %u\n", + dev->device->name, (void *)dev, dev->data->nb_tx_queues); + + RTE_LOG(INFO, PMD, "%s: %p: RX configured queues number: %u\n", + dev->device->name, (void *)dev, dev->data->nb_rx_queues); + return 0; } @@ -650,21 +727,20 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->if_index = internals->if_index; dev_info->max_mac_addrs = 1; dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN; - dev_info->max_rx_queues = internals->nb_queues; - dev_info->max_tx_queues = internals->nb_queues; + dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES; + dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES; dev_info->min_rx_bufsize = 0; dev_info->pci_dev = NULL; dev_info->speed_capa = tap_dev_speed_capa(); - dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM); - dev_info->tx_offload_capa = - (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM); + dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa(); + dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa(); + dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() | + dev_info->tx_queue_offload_capa; } -static void +static int tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats) { unsigned int i, imax; @@ -673,9 +749,9 @@ tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats) unsigned long rx_nombuf = 0, ierrors = 0; const struct pmd_internals *pmd = dev->data->dev_private; - imax = (pmd->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? - pmd->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS; - + /* rx queue statistics */ + imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? + dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS; for (i = 0; i < imax; i++) { tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets; tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes; @@ -683,7 +759,13 @@ tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats) rx_bytes_total += tap_stats->q_ibytes[i]; rx_nombuf += pmd->rxq[i].stats.rx_nombuf; ierrors += pmd->rxq[i].stats.ierrors; + } + + /* tx queue statistics */ + imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? + dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS; + for (i = 0; i < imax; i++) { tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets; tap_stats->q_errors[i] = pmd->txq[i].stats.errs; tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes; @@ -699,6 +781,7 @@ tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats) tap_stats->opackets = tx_total; tap_stats->oerrors = tx_err_total; tap_stats->obytes = tx_bytes_total; + return 0; } static void @@ -707,7 +790,7 @@ tap_stats_reset(struct rte_eth_dev *dev) int i; struct pmd_internals *pmd = dev->data->dev_private; - for (i = 0; i < pmd->nb_queues; i++) { + for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { pmd->rxq[i].stats.ipackets = 0; pmd->rxq[i].stats.ibytes = 0; pmd->rxq[i].stats.ierrors = 0; @@ -729,11 +812,15 @@ tap_dev_close(struct rte_eth_dev *dev) tap_flow_flush(dev, NULL); tap_flow_implicit_flush(internals, NULL); - for (i = 0; i < internals->nb_queues; i++) { - if (internals->rxq[i].fd != -1) + for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { + if (internals->rxq[i].fd != -1) { close(internals->rxq[i].fd); - internals->rxq[i].fd = -1; - internals->txq[i].fd = -1; + internals->rxq[i].fd = -1; + } + if (internals->txq[i].fd != -1) { + close(internals->txq[i].fd); + internals->txq[i].fd = -1; + } } if (internals->remote_if_index) { @@ -887,30 +974,57 @@ tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) static int tap_setup_queue(struct rte_eth_dev *dev, struct pmd_internals *internals, - uint16_t qid) + uint16_t qid, + int is_rx) { + int *fd; + int *other_fd; + const char *dir; struct pmd_internals *pmd = dev->data->dev_private; struct rx_queue *rx = &internals->rxq[qid]; struct tx_queue *tx = &internals->txq[qid]; - int fd = rx->fd == -1 ? tx->fd : rx->fd; - if (fd == -1) { - RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n", - pmd->name, qid); - fd = tun_alloc(pmd); - if (fd < 0) { + if (is_rx) { + fd = &rx->fd; + other_fd = &tx->fd; + dir = "rx"; + } else { + fd = &tx->fd; + other_fd = &rx->fd; + dir = "tx"; + } + if (*fd != -1) { + /* fd for this queue already exists */ + RTE_LOG(DEBUG, PMD, "%s: fd %d for %s queue qid %d exists\n", + pmd->name, *fd, dir, qid); + } else if (*other_fd != -1) { + /* Only other_fd exists. dup it */ + *fd = dup(*other_fd); + if (*fd < 0) { + *fd = -1; + RTE_LOG(ERR, PMD, "%s: dup() failed.\n", + pmd->name); + return -1; + } + RTE_LOG(DEBUG, PMD, "%s: dup fd %d for %s queue qid %d (%d)\n", + pmd->name, *other_fd, dir, qid, *fd); + } else { + /* Both RX and TX fds do not exist (equal -1). Create fd */ + *fd = tun_alloc(pmd); + if (*fd < 0) { + *fd = -1; /* restore original value */ RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n", pmd->name); return -1; } + RTE_LOG(DEBUG, PMD, "%s: add %s queue for qid %d fd %d\n", + pmd->name, dir, qid, *fd); } - rx->fd = fd; - tx->fd = fd; tx->mtu = &dev->data->mtu; rx->rxmode = &dev->data->dev_conf.rxmode; - return fd; + return *fd; } static int @@ -932,13 +1046,26 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, int fd; int i; - if ((rx_queue_id >= internals->nb_queues) || !mp) { + if (rx_queue_id >= dev->data->nb_rx_queues || !mp) { RTE_LOG(WARNING, PMD, - "nb_queues %d too small or mempool NULL\n", - internals->nb_queues); + "nb_rx_queues %d too small or mempool NULL\n", + dev->data->nb_rx_queues); return -1; } + /* Verify application offloads are valid for our port and queue. */ + if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) { + rte_errno = ENOTSUP; + RTE_LOG(ERR, PMD, + "%p: Rx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported offloads 0x%" PRIx64 "\n", + (void *)dev, rx_conf->offloads, + dev->data->dev_conf.rxmode.offloads, + (tap_rx_offload_get_port_capa() | + tap_rx_offload_get_queue_capa())); + return -rte_errno; + } rxq->mp = mp; rxq->trigger_seen = 1; /* force initial burst */ rxq->in_port = dev->data->port_id; @@ -954,7 +1081,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, rxq->iovecs = iovecs; dev->data->rx_queues[rx_queue_id] = rxq; - fd = tap_setup_queue(dev, internals, rx_queue_id); + fd = tap_setup_queue(dev, internals, rx_queue_id, 1); if (fd == -1) { ret = fd; goto error; @@ -997,21 +1124,46 @@ tap_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused, unsigned int socket_id __rte_unused, - const struct rte_eth_txconf *tx_conf __rte_unused) + const struct rte_eth_txconf *tx_conf) { struct pmd_internals *internals = dev->data->dev_private; + struct tx_queue *txq; int ret; - if (tx_queue_id >= internals->nb_queues) + if (tx_queue_id >= dev->data->nb_tx_queues) return -1; - dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id]; - ret = tap_setup_queue(dev, internals, tx_queue_id); + txq = dev->data->tx_queues[tx_queue_id]; + /* + * Don't verify port offloads for application which + * use the old API. + */ + if (tx_conf != NULL && + !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) { + if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) { + txq->csum = !!(tx_conf->offloads & + (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM)); + } else { + rte_errno = ENOTSUP; + RTE_LOG(ERR, PMD, + "%p: Tx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported offloads 0x%" PRIx64, + (void *)dev, tx_conf->offloads, + dev->data->dev_conf.txmode.offloads, + tap_tx_offload_get_port_capa()); + return -rte_errno; + } + } + ret = tap_setup_queue(dev, internals, tx_queue_id, 0); if (ret == -1) return -1; - - RTE_LOG(DEBUG, PMD, " TX TAP device name %s, qid %d on fd %d\n", - internals->name, tx_queue_id, internals->txq[tx_queue_id].fd); + RTE_LOG(DEBUG, PMD, + " TX TAP device name %s, qid %d on fd %d csum %s\n", + internals->name, tx_queue_id, internals->txq[tx_queue_id].fd, + txq->csum ? "on" : "off"); return 0; } @@ -1062,7 +1214,7 @@ tap_dev_intr_handler(void *cb_arg) struct rte_eth_dev *dev = cb_arg; struct pmd_internals *pmd = dev->data->dev_private; - nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev); + tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev); } static int @@ -1072,20 +1224,21 @@ tap_intr_handle_set(struct rte_eth_dev *dev, int set) /* In any case, disable interrupt if the conf is no longer there. */ if (!dev->data->dev_conf.intr_conf.lsc) { - if (pmd->intr_handle.fd != -1) - nl_final(pmd->intr_handle.fd); - rte_intr_callback_unregister( - &pmd->intr_handle, tap_dev_intr_handler, dev); + if (pmd->intr_handle.fd != -1) { + tap_nl_final(pmd->intr_handle.fd); + rte_intr_callback_unregister(&pmd->intr_handle, + tap_dev_intr_handler, dev); + } return 0; } if (set) { - pmd->intr_handle.fd = nl_init(RTMGRP_LINK); + pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK); if (unlikely(pmd->intr_handle.fd == -1)) return -EBADF; return rte_intr_callback_register( &pmd->intr_handle, tap_dev_intr_handler, dev); } - nl_final(pmd->intr_handle.fd); + tap_nl_final(pmd->intr_handle.fd); return rte_intr_callback_unregister(&pmd->intr_handle, tap_dev_intr_handler, dev); } @@ -1166,7 +1319,6 @@ static const struct eth_dev_ops ops = { .filter_ctrl = tap_dev_filter_ctrl, }; - static int eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, char *remote_iface, int fixed_mac_type) @@ -1193,8 +1345,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, } pmd = dev->data->dev_private; + pmd->dev = dev; snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name); - pmd->nb_queues = RTE_PMD_TAP_MAX_QUEUES; pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0); if (pmd->ioctl_sock == -1) { @@ -1207,13 +1359,14 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, /* Setup some default values */ rte_memcpy(data, dev->data, sizeof(*data)); data->dev_private = pmd; - data->dev_flags = RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC; + data->dev_flags = RTE_ETH_DEV_INTR_LSC; data->numa_node = numa_node; data->dev_link = pmd_link; data->mac_addrs = &pmd->eth_addr; - data->nb_rx_queues = pmd->nb_queues; - data->nb_tx_queues = pmd->nb_queues; + /* Set the number of RX and TX queues */ + data->nb_rx_queues = 0; + data->nb_tx_queues = 0; dev->data = data; dev->dev_ops = &ops; @@ -1241,7 +1394,11 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, } /* Immediately create the netdevice (this will create the 1st queue). */ - if (tap_setup_queue(dev, pmd, 0) == -1) + /* rx queue */ + if (tap_setup_queue(dev, pmd, 0, 1) == -1) + goto error_exit; + /* tx queue */ + if (tap_setup_queue(dev, pmd, 0, 0) == -1) goto error_exit; ifr.ifr_mtu = dev->data->mtu; @@ -1262,7 +1419,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, * - rte_flow actual/implicit lists * - implicit rules */ - pmd->nlsk_fd = nl_init(0); + pmd->nlsk_fd = tap_nl_init(0); if (pmd->nlsk_fd == -1) { RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n", pmd->name); @@ -1513,11 +1670,18 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) if (internals->nlsk_fd) { tap_flow_flush(eth_dev, NULL); tap_flow_implicit_flush(internals, NULL); - nl_final(internals->nlsk_fd); + tap_nl_final(internals->nlsk_fd); } - for (i = 0; i < internals->nb_queues; i++) - if (internals->rxq[i].fd != -1) + for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { + if (internals->rxq[i].fd != -1) { close(internals->rxq[i].fd); + internals->rxq[i].fd = -1; + } + if (internals->txq[i].fd != -1) { + close(internals->txq[i].fd); + internals->txq[i].fd = -1; + } + } close(internals->ioctl_sock); rte_free(eth_dev->data->dev_private);