X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftap%2Frte_eth_tap.c;h=9d39384ac5cadd15977f8fa1a340cd68c43d993e;hb=cb7e68da630abe7bb7b69885cc2a1a8e6d705943;hp=206a0d6946f7787b8ed0b3f439c20aecd5b69c6f;hpb=050fe6e9ff970ff92d842912136be8f9f52e171f;p=dpdk.git diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c index 206a0d6946..9d39384ac5 100644 --- a/drivers/net/tap/rte_eth_tap.c +++ b/drivers/net/tap/rte_eth_tap.c @@ -1,46 +1,20 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include #include +#include #include #include -#include +#include #include #include -#include +#include #include #include +#include +#include #include #include @@ -50,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -57,7 +32,6 @@ #include #include #include -#include #include #include @@ -70,18 +44,16 @@ #define DEFAULT_TAP_NAME "dtap" #define ETH_TAP_IFACE_ARG "iface" -#define ETH_TAP_SPEED_ARG "speed" #define ETH_TAP_REMOTE_ARG "remote" - -#define FLOWER_KERNEL_VERSION KERNEL_VERSION(4, 2, 0) -#define FLOWER_VLAN_KERNEL_VERSION KERNEL_VERSION(4, 9, 0) +#define ETH_TAP_MAC_ARG "mac" +#define ETH_TAP_MAC_FIXED "fixed" static struct rte_vdev_driver pmd_tap_drv; static const char *valid_arguments[] = { ETH_TAP_IFACE_ARG, - ETH_TAP_SPEED_ARG, ETH_TAP_REMOTE_ARG, + ETH_TAP_MAC_ARG, NULL }; @@ -93,7 +65,7 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_SPEED_AUTONEG + .link_autoneg = ETH_LINK_AUTONEG }; static void @@ -110,10 +82,6 @@ enum ioctl_mode { REMOTE_ONLY, }; -static int -tap_ioctl(struct pmd_internals *pmd, unsigned long request, - struct ifreq *ifr, int set, enum ioctl_mode mode); - static int tap_intr_handle_set(struct rte_eth_dev *dev, int set); /* Tun/Tap allocation routine @@ -122,7 +90,7 @@ static int tap_intr_handle_set(struct rte_eth_dev *dev, int set); * supplied name. */ static int -tun_alloc(struct pmd_internals *pmd, uint16_t qid) +tun_alloc(struct pmd_internals *pmd) { struct ifreq ifr; #ifdef IFF_MULTI_QUEUE @@ -143,7 +111,7 @@ tun_alloc(struct pmd_internals *pmd, uint16_t qid) fd = open(TUN_TAP_DEV_PATH, O_RDWR); if (fd < 0) { - RTE_LOG(ERR, PMD, "Unable to create TAP interface"); + RTE_LOG(ERR, PMD, "Unable to create TAP interface\n"); goto error; } @@ -221,75 +189,6 @@ tun_alloc(struct pmd_internals *pmd, uint16_t qid) strerror(errno)); } - if (qid == 0) { - struct ifreq ifr; - - /* - * pmd->eth_addr contains the desired MAC, either from remote - * or from a random assignment. Sync it with the tap netdevice. - */ - ifr.ifr_hwaddr.sa_family = AF_LOCAL; - rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, - ETHER_ADDR_LEN); - if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) - goto error; - - pmd->if_index = if_nametoindex(pmd->name); - if (!pmd->if_index) { - RTE_LOG(ERR, PMD, - "Could not find ifindex for %s: rte_flow won't be usable.\n", - pmd->name); - return fd; - } - if (!pmd->flower_support) - return fd; - if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) { - RTE_LOG(ERR, PMD, - "Could not create multiq qdisc for %s: rte_flow won't be usable.\n", - pmd->name); - return fd; - } - if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) { - RTE_LOG(ERR, PMD, - "Could not create multiq qdisc for %s: rte_flow won't be usable.\n", - pmd->name); - return fd; - } - if (pmd->remote_if_index) { - /* - * Flush usually returns negative value because it tries - * to delete every QDISC (and on a running device, one - * QDISC at least is needed). Ignore negative return - * value. - */ - qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index); - if (qdisc_create_ingress(pmd->nlsk_fd, - pmd->remote_if_index) < 0) - goto remote_fail; - LIST_INIT(&pmd->implicit_flows); - if (tap_flow_implicit_create( - pmd, TAP_REMOTE_LOCAL_MAC) < 0) - goto remote_fail; - if (tap_flow_implicit_create( - pmd, TAP_REMOTE_BROADCAST) < 0) - goto remote_fail; - if (tap_flow_implicit_create( - pmd, TAP_REMOTE_BROADCASTV6) < 0) - goto remote_fail; - if (tap_flow_implicit_create( - pmd, TAP_REMOTE_TX) < 0) - goto remote_fail; - } - } - - return fd; - -remote_fail: - RTE_LOG(ERR, PMD, - "Could not set up remote flow rules for %s: remote disabled.\n", - pmd->name); - pmd->remote_if_index = 0; - tap_flow_implicit_flush(pmd, NULL); return fd; error: @@ -298,6 +197,97 @@ error: return -1; } +static void +tap_verify_csum(struct rte_mbuf *mbuf) +{ + uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK; + uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK; + uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK; + unsigned int l2_len = sizeof(struct ether_hdr); + unsigned int l3_len; + uint16_t cksum = 0; + void *l3_hdr; + void *l4_hdr; + + if (l2 == RTE_PTYPE_L2_ETHER_VLAN) + l2_len += 4; + else if (l2 == RTE_PTYPE_L2_ETHER_QINQ) + l2_len += 8; + /* Don't verify checksum for packets with discontinuous L2 header */ + if (unlikely(l2_len + sizeof(struct ipv4_hdr) > + rte_pktmbuf_data_len(mbuf))) + return; + l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len); + if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) { + struct ipv4_hdr *iph = l3_hdr; + + /* ihl contains the number of 4-byte words in the header */ + l3_len = 4 * (iph->version_ihl & 0xf); + if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf))) + return; + + cksum = ~rte_raw_cksum(iph, l3_len); + mbuf->ol_flags |= cksum ? + PKT_RX_IP_CKSUM_BAD : + PKT_RX_IP_CKSUM_GOOD; + } else if (l3 == RTE_PTYPE_L3_IPV6) { + l3_len = sizeof(struct ipv6_hdr); + } else { + /* IPv6 extensions are not supported */ + return; + } + if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) { + l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len); + /* Don't verify checksum for multi-segment packets. */ + if (mbuf->nb_segs > 1) + return; + if (l3 == RTE_PTYPE_L3_IPV4) + cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr); + else if (l3 == RTE_PTYPE_L3_IPV6) + cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr); + mbuf->ol_flags |= cksum ? + PKT_RX_L4_CKSUM_BAD : + PKT_RX_L4_CKSUM_GOOD; + } +} + +static uint64_t +tap_rx_offload_get_port_capa(void) +{ + /* + * In order to support legacy apps, + * report capabilities also as port capabilities. + */ + return DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; +} + +static uint64_t +tap_rx_offload_get_queue_capa(void) +{ + return DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; +} + +static bool +tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads) +{ + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; + uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa(); + uint64_t port_supp_offloads = tap_rx_offload_get_port_capa(); + + if ((offloads & (queue_supp_offloads | port_supp_offloads)) != + offloads) + return false; + if ((port_offloads ^ offloads) & port_supp_offloads) + return false; + return true; +} + /* Callback to handle the rx burst of packets to the correct interface and * file descriptor(s) in a multi-queue setup. */ @@ -322,8 +312,9 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) int len; len = readv(rxq->fd, *rxq->iovecs, - 1 + (rxq->rxmode->enable_scatter ? - rxq->nb_rx_desc : 1)); + 1 + + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ? + rxq->nb_rx_desc : 1)); if (len < (int)sizeof(struct tun_pi)) break; @@ -378,6 +369,8 @@ pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) seg->next = NULL; mbuf->packet_type = rte_net_get_ptype(mbuf, NULL, RTE_PTYPE_ALL_MASK); + if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + tap_verify_csum(mbuf); /* account for the receive frame */ bufs[num_rx++] = mbuf; @@ -390,6 +383,94 @@ end: return num_rx; } +static uint64_t +tap_tx_offload_get_port_capa(void) +{ + /* + * In order to support legacy apps, + * report capabilities also as port capabilities. + */ + return DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; +} + +static uint64_t +tap_tx_offload_get_queue_capa(void) +{ + return DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; +} + +static bool +tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads) +{ + uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa(); + uint64_t port_supp_offloads = tap_tx_offload_get_port_capa(); + + if ((offloads & (queue_supp_offloads | port_supp_offloads)) != + offloads) + return false; + /* Verify we have no conflict with port offloads */ + if ((port_offloads ^ offloads) & port_supp_offloads) + return false; + return true; +} + +static void +tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len, + unsigned int l3_len) +{ + void *l3_hdr = packet + l2_len; + + if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) { + struct ipv4_hdr *iph = l3_hdr; + uint16_t cksum; + + iph->hdr_checksum = 0; + cksum = rte_raw_cksum(iph, l3_len); + iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum; + } + if (ol_flags & PKT_TX_L4_MASK) { + uint16_t l4_len; + uint32_t cksum; + uint16_t *l4_cksum; + void *l4_hdr; + + l4_hdr = packet + l2_len + l3_len; + if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) + l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum; + else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) + l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum; + else + return; + *l4_cksum = 0; + if (ol_flags & PKT_TX_IPV4) { + struct ipv4_hdr *iph = l3_hdr; + + l4_len = rte_be_to_cpu_16(iph->total_length) - l3_len; + cksum = rte_ipv4_phdr_cksum(l3_hdr, 0); + } else { + struct ipv6_hdr *ip6h = l3_hdr; + + /* payload_len does not include ext headers */ + l4_len = rte_be_to_cpu_16(ip6h->payload_len) - + l3_len + sizeof(struct ipv6_hdr); + cksum = rte_ipv6_phdr_cksum(l3_hdr, 0); + } + cksum += rte_raw_cksum(l4_hdr, l4_len); + cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff); + cksum = (~cksum) & 0xffff; + if (cksum == 0) + cksum = 0xffff; + *l4_cksum = cksum; + } +} + /* Callback to handle sending packets from the tap interface */ static uint16_t @@ -410,6 +491,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct iovec iovecs[mbuf->nb_segs + 1]; struct tun_pi pi = { .flags = 0 }; struct rte_mbuf *seg = mbuf; + char m_copy[mbuf->data_len]; int n; int j; @@ -425,6 +507,20 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) rte_pktmbuf_mtod(seg, void *); seg = seg->next; } + if (txq->csum && + ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) || + (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM || + (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) { + /* Support only packets with all data in the same seg */ + if (mbuf->nb_segs > 1) + break; + /* To change checksums, work on a copy of data. */ + rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *), + rte_pktmbuf_data_len(mbuf)); + tap_tx_offload(m_copy, mbuf->ol_flags, + mbuf->l2_len, mbuf->l3_len); + iovecs[1].iov_base = m_copy; + } /* copy the tx frame data */ n = writev(txq->fd, iovecs, mbuf->nb_segs + 1); if (n <= 0) @@ -442,6 +538,24 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) return num_tx; } +static const char * +tap_ioctl_req2str(unsigned long request) +{ + switch (request) { + case SIOCSIFFLAGS: + return "SIOCSIFFLAGS"; + case SIOCGIFFLAGS: + return "SIOCGIFFLAGS"; + case SIOCGIFHWADDR: + return "SIOCGIFHWADDR"; + case SIOCSIFHWADDR: + return "SIOCSIFHWADDR"; + case SIOCSIFMTU: + return "SIOCSIFMTU"; + } + return "UNKNOWN"; +} + static int tap_ioctl(struct pmd_internals *pmd, unsigned long request, struct ifreq *ifr, int set, enum ioctl_mode mode) @@ -477,9 +591,7 @@ apply: case SIOCSIFMTU: break; default: - RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n", - pmd->name); - return -EINVAL; + RTE_ASSERT(!"unsupported request type: must not happen"); } if (ioctl(pmd->ioctl_sock, request, ifr) < 0) goto error; @@ -488,8 +600,8 @@ apply: return 0; error: - RTE_LOG(ERR, PMD, "%s: ioctl(%lu) failed with error: %s\n", - ifr->ifr_name, request, strerror(errno)); + RTE_LOG(DEBUG, PMD, "%s: %s(%s) failed: %s(%d)\n", ifr->ifr_name, + __func__, tap_ioctl_req2str(request), strerror(errno), errno); return -errno; } @@ -500,7 +612,7 @@ tap_link_set_down(struct rte_eth_dev *dev) struct ifreq ifr = { .ifr_flags = IFF_UP }; dev->data->dev_link.link_status = ETH_LINK_DOWN; - return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE); + return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY); } static int @@ -534,8 +646,42 @@ tap_dev_stop(struct rte_eth_dev *dev) } static int -tap_dev_configure(struct rte_eth_dev *dev __rte_unused) +tap_dev_configure(struct rte_eth_dev *dev) { + uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa(); + uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; + + if ((tx_offloads & supp_tx_offloads) != tx_offloads) { + rte_errno = ENOTSUP; + RTE_LOG(ERR, PMD, + "Some Tx offloads are not supported " + "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", + tx_offloads, supp_tx_offloads); + return -rte_errno; + } + if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) { + RTE_LOG(ERR, PMD, + "%s: number of rx queues %d exceeds max num of queues %d\n", + dev->device->name, + dev->data->nb_rx_queues, + RTE_PMD_TAP_MAX_QUEUES); + return -1; + } + if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) { + RTE_LOG(ERR, PMD, + "%s: number of tx queues %d exceeds max num of queues %d\n", + dev->device->name, + dev->data->nb_tx_queues, + RTE_PMD_TAP_MAX_QUEUES); + return -1; + } + + RTE_LOG(INFO, PMD, "%s: %p: TX configured queues number: %u\n", + dev->device->name, (void *)dev, dev->data->nb_tx_queues); + + RTE_LOG(INFO, PMD, "%s: %p: RX configured queues number: %u\n", + dev->device->name, (void *)dev, dev->data->nb_rx_queues); + return 0; } @@ -581,14 +727,20 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->if_index = internals->if_index; dev_info->max_mac_addrs = 1; dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN; - dev_info->max_rx_queues = internals->nb_queues; - dev_info->max_tx_queues = internals->nb_queues; + dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES; + dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES; dev_info->min_rx_bufsize = 0; dev_info->pci_dev = NULL; dev_info->speed_capa = tap_dev_speed_capa(); + dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa(); + dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa(); + dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() | + dev_info->tx_queue_offload_capa; } -static void +static int tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats) { unsigned int i, imax; @@ -597,9 +749,9 @@ tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats) unsigned long rx_nombuf = 0, ierrors = 0; const struct pmd_internals *pmd = dev->data->dev_private; - imax = (pmd->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? - pmd->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS; - + /* rx queue statistics */ + imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? + dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS; for (i = 0; i < imax; i++) { tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets; tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes; @@ -607,7 +759,13 @@ tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats) rx_bytes_total += tap_stats->q_ibytes[i]; rx_nombuf += pmd->rxq[i].stats.rx_nombuf; ierrors += pmd->rxq[i].stats.ierrors; + } + /* tx queue statistics */ + imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? + dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS; + + for (i = 0; i < imax; i++) { tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets; tap_stats->q_errors[i] = pmd->txq[i].stats.errs; tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes; @@ -623,6 +781,7 @@ tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats) tap_stats->opackets = tx_total; tap_stats->oerrors = tx_err_total; tap_stats->obytes = tx_bytes_total; + return 0; } static void @@ -631,7 +790,7 @@ tap_stats_reset(struct rte_eth_dev *dev) int i; struct pmd_internals *pmd = dev->data->dev_private; - for (i = 0; i < pmd->nb_queues; i++) { + for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { pmd->rxq[i].stats.ipackets = 0; pmd->rxq[i].stats.ibytes = 0; pmd->rxq[i].stats.ierrors = 0; @@ -644,7 +803,7 @@ tap_stats_reset(struct rte_eth_dev *dev) } static void -tap_dev_close(struct rte_eth_dev *dev __rte_unused) +tap_dev_close(struct rte_eth_dev *dev) { int i; struct pmd_internals *internals = dev->data->dev_private; @@ -653,11 +812,21 @@ tap_dev_close(struct rte_eth_dev *dev __rte_unused) tap_flow_flush(dev, NULL); tap_flow_implicit_flush(internals, NULL); - for (i = 0; i < internals->nb_queues; i++) { - if (internals->rxq[i].fd != -1) + for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { + if (internals->rxq[i].fd != -1) { close(internals->rxq[i].fd); - internals->rxq[i].fd = -1; - internals->txq[i].fd = -1; + internals->rxq[i].fd = -1; + } + if (internals->txq[i].fd != -1) { + close(internals->txq[i].fd); + internals->txq[i].fd = -1; + } + } + + if (internals->remote_if_index) { + /* Restore initial remote state */ + ioctl(internals->ioctl_sock, SIOCSIFFLAGS, + &internals->remote_initial_flags); } } @@ -718,7 +887,7 @@ tap_promisc_enable(struct rte_eth_dev *dev) dev->data->promiscuous = 1; tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE); - if (pmd->remote_if_index) + if (pmd->remote_if_index && !pmd->flow_isolate) tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC); } @@ -730,7 +899,7 @@ tap_promisc_disable(struct rte_eth_dev *dev) dev->data->promiscuous = 0; tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE); - if (pmd->remote_if_index) + if (pmd->remote_if_index && !pmd->flow_isolate) tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC); } @@ -742,7 +911,7 @@ tap_allmulti_enable(struct rte_eth_dev *dev) dev->data->all_multicast = 1; tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE); - if (pmd->remote_if_index) + if (pmd->remote_if_index && !pmd->flow_isolate) tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI); } @@ -754,114 +923,108 @@ tap_allmulti_disable(struct rte_eth_dev *dev) dev->data->all_multicast = 0; tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE); - if (pmd->remote_if_index) + if (pmd->remote_if_index && !pmd->flow_isolate) tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI); } - static void tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct pmd_internals *pmd = dev->data->dev_private; + enum ioctl_mode mode = LOCAL_ONLY; struct ifreq ifr; if (is_zero_ether_addr(mac_addr)) { RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n", - dev->data->name); + dev->device->name); return; } /* Check the actual current MAC address on the tap netdevice */ - if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) != 0) { - RTE_LOG(ERR, PMD, - "%s: couldn't check current tap MAC address\n", - dev->data->name); + if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) return; - } if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data, mac_addr)) return; - + /* Check the current MAC address on the remote */ + if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) + return; + if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data, + mac_addr)) + mode = LOCAL_AND_REMOTE; ifr.ifr_hwaddr.sa_family = AF_LOCAL; rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN); - if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, LOCAL_AND_REMOTE) < 0) + if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode) < 0) return; rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN); - if (pmd->remote_if_index) { + if (pmd->remote_if_index && !pmd->flow_isolate) { /* Replace MAC redirection rule after a MAC change */ if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) { RTE_LOG(ERR, PMD, "%s: Couldn't delete MAC redirection rule\n", - dev->data->name); + dev->device->name); return; } if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0) RTE_LOG(ERR, PMD, "%s: Couldn't add MAC redirection rule\n", - dev->data->name); + dev->device->name); } } static int tap_setup_queue(struct rte_eth_dev *dev, struct pmd_internals *internals, - uint16_t qid) + uint16_t qid, + int is_rx) { + int *fd; + int *other_fd; + const char *dir; struct pmd_internals *pmd = dev->data->dev_private; struct rx_queue *rx = &internals->rxq[qid]; struct tx_queue *tx = &internals->txq[qid]; - int fd; - fd = rx->fd; - if (fd < 0) { - fd = tx->fd; - if (fd < 0) { - RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n", - pmd->name, qid); - fd = tun_alloc(pmd, qid); - if (fd < 0) { - RTE_LOG(ERR, PMD, "tun_alloc(%s, %d) failed\n", - pmd->name, qid); - return -1; - } - if (qid == 0) { - struct ifreq ifr; - - ifr.ifr_mtu = dev->data->mtu; - if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, - LOCAL_AND_REMOTE) < 0) { - close(fd); - return -1; - } - } + if (is_rx) { + fd = &rx->fd; + other_fd = &tx->fd; + dir = "rx"; + } else { + fd = &tx->fd; + other_fd = &rx->fd; + dir = "tx"; + } + if (*fd != -1) { + /* fd for this queue already exists */ + RTE_LOG(DEBUG, PMD, "%s: fd %d for %s queue qid %d exists\n", + pmd->name, *fd, dir, qid); + } else if (*other_fd != -1) { + /* Only other_fd exists. dup it */ + *fd = dup(*other_fd); + if (*fd < 0) { + *fd = -1; + RTE_LOG(ERR, PMD, "%s: dup() failed.\n", + pmd->name); + return -1; } + RTE_LOG(DEBUG, PMD, "%s: dup fd %d for %s queue qid %d (%d)\n", + pmd->name, *other_fd, dir, qid, *fd); + } else { + /* Both RX and TX fds do not exist (equal -1). Create fd */ + *fd = tun_alloc(pmd); + if (*fd < 0) { + *fd = -1; /* restore original value */ + RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n", + pmd->name); + return -1; + } + RTE_LOG(DEBUG, PMD, "%s: add %s queue for qid %d fd %d\n", + pmd->name, dir, qid, *fd); } - rx->fd = fd; - tx->fd = fd; tx->mtu = &dev->data->mtu; rx->rxmode = &dev->data->dev_conf.rxmode; - return fd; -} - -static int -rx_setup_queue(struct rte_eth_dev *dev, - struct pmd_internals *internals, - uint16_t qid) -{ - dev->data->rx_queues[qid] = &internals->rxq[qid]; - - return tap_setup_queue(dev, internals, qid); -} - -static int -tx_setup_queue(struct rte_eth_dev *dev, - struct pmd_internals *internals, - uint16_t qid) -{ - dev->data->tx_queues[qid] = &internals->txq[qid]; - - return tap_setup_queue(dev, internals, qid); + return *fd; } static int @@ -875,34 +1038,50 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, struct pmd_internals *internals = dev->data->dev_private; struct rx_queue *rxq = &internals->rxq[rx_queue_id]; struct rte_mbuf **tmp = &rxq->pool; - struct iovec (*iovecs)[nb_rx_desc + 1]; + long iov_max = sysconf(_SC_IOV_MAX); + uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1); + struct iovec (*iovecs)[nb_desc + 1]; int data_off = RTE_PKTMBUF_HEADROOM; int ret = 0; int fd; int i; - if ((rx_queue_id >= internals->nb_queues) || !mp) { + if (rx_queue_id >= dev->data->nb_rx_queues || !mp) { RTE_LOG(WARNING, PMD, - "nb_queues %d too small or mempool NULL\n", - internals->nb_queues); + "nb_rx_queues %d too small or mempool NULL\n", + dev->data->nb_rx_queues); return -1; } + /* Verify application offloads are valid for our port and queue. */ + if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) { + rte_errno = ENOTSUP; + RTE_LOG(ERR, PMD, + "%p: Rx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported offloads 0x%" PRIx64 "\n", + (void *)dev, rx_conf->offloads, + dev->data->dev_conf.rxmode.offloads, + (tap_rx_offload_get_port_capa() | + tap_rx_offload_get_queue_capa())); + return -rte_errno; + } rxq->mp = mp; rxq->trigger_seen = 1; /* force initial burst */ rxq->in_port = dev->data->port_id; - rxq->nb_rx_desc = nb_rx_desc; - iovecs = rte_zmalloc_socket(dev->data->name, sizeof(*iovecs), 0, + rxq->nb_rx_desc = nb_desc; + iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0, socket_id); if (!iovecs) { RTE_LOG(WARNING, PMD, "%s: Couldn't allocate %d RX descriptors\n", - dev->data->name, nb_rx_desc); + dev->device->name, nb_desc); return -ENOMEM; } rxq->iovecs = iovecs; - fd = rx_setup_queue(dev, internals, rx_queue_id); + dev->data->rx_queues[rx_queue_id] = rxq; + fd = tap_setup_queue(dev, internals, rx_queue_id, 1); if (fd == -1) { ret = fd; goto error; @@ -911,12 +1090,12 @@ tap_rx_queue_setup(struct rte_eth_dev *dev, (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi); (*rxq->iovecs)[0].iov_base = &rxq->pi; - for (i = 1; i <= nb_rx_desc; i++) { + for (i = 1; i <= nb_desc; i++) { *tmp = rte_pktmbuf_alloc(rxq->mp); if (!*tmp) { RTE_LOG(WARNING, PMD, "%s: couldn't allocate memory for queue %d\n", - dev->data->name, rx_queue_id); + dev->device->name, rx_queue_id); ret = -ENOMEM; goto error; } @@ -945,20 +1124,46 @@ tap_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused, unsigned int socket_id __rte_unused, - const struct rte_eth_txconf *tx_conf __rte_unused) + const struct rte_eth_txconf *tx_conf) { struct pmd_internals *internals = dev->data->dev_private; + struct tx_queue *txq; int ret; - if (tx_queue_id >= internals->nb_queues) + if (tx_queue_id >= dev->data->nb_tx_queues) return -1; - - ret = tx_setup_queue(dev, internals, tx_queue_id); + dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id]; + txq = dev->data->tx_queues[tx_queue_id]; + /* + * Don't verify port offloads for application which + * use the old API. + */ + if (tx_conf != NULL && + !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) { + if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) { + txq->csum = !!(tx_conf->offloads & + (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM)); + } else { + rte_errno = ENOTSUP; + RTE_LOG(ERR, PMD, + "%p: Tx queue offloads 0x%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported offloads 0x%" PRIx64, + (void *)dev, tx_conf->offloads, + dev->data->dev_conf.txmode.offloads, + tap_tx_offload_get_port_capa()); + return -rte_errno; + } + } + ret = tap_setup_queue(dev, internals, tx_queue_id, 0); if (ret == -1) return -1; - - RTE_LOG(DEBUG, PMD, " TX TAP device name %s, qid %d on fd %d\n", - internals->name, tx_queue_id, internals->txq[tx_queue_id].fd); + RTE_LOG(DEBUG, PMD, + " TX TAP device name %s, qid %d on fd %d csum %s\n", + internals->name, tx_queue_id, internals->txq[tx_queue_id].fd, + txq->csum ? "on" : "off"); return 0; } @@ -1009,34 +1214,49 @@ tap_dev_intr_handler(void *cb_arg) struct rte_eth_dev *dev = cb_arg; struct pmd_internals *pmd = dev->data->dev_private; - nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev); + tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev); } static int -tap_intr_handle_set(struct rte_eth_dev *dev, int set) +tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set) { struct pmd_internals *pmd = dev->data->dev_private; /* In any case, disable interrupt if the conf is no longer there. */ if (!dev->data->dev_conf.intr_conf.lsc) { - if (pmd->intr_handle.fd != -1) - nl_final(pmd->intr_handle.fd); - rte_intr_callback_unregister( - &pmd->intr_handle, tap_dev_intr_handler, dev); + if (pmd->intr_handle.fd != -1) { + tap_nl_final(pmd->intr_handle.fd); + rte_intr_callback_unregister(&pmd->intr_handle, + tap_dev_intr_handler, dev); + } return 0; } if (set) { - pmd->intr_handle.fd = nl_init(RTMGRP_LINK); + pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK); if (unlikely(pmd->intr_handle.fd == -1)) return -EBADF; return rte_intr_callback_register( &pmd->intr_handle, tap_dev_intr_handler, dev); } - nl_final(pmd->intr_handle.fd); + tap_nl_final(pmd->intr_handle.fd); return rte_intr_callback_unregister(&pmd->intr_handle, tap_dev_intr_handler, dev); } +static int +tap_intr_handle_set(struct rte_eth_dev *dev, int set) +{ + int err; + + err = tap_lsc_intr_handle_set(dev, set); + if (err) + return err; + err = tap_rx_intr_vec_set(dev, set); + if (err && set) + tap_lsc_intr_handle_set(dev, 0); + return err; +} + static const uint32_t* tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) { @@ -1113,32 +1333,15 @@ static const struct eth_dev_ops ops = { .filter_ctrl = tap_dev_filter_ctrl, }; -static int -tap_kernel_support(struct pmd_internals *pmd) -{ - struct utsname utsname; - int ver[3]; - - if (uname(&utsname) == -1 || - sscanf(utsname.release, "%d.%d.%d", - &ver[0], &ver[1], &ver[2]) != 3) - return 0; - if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >= FLOWER_KERNEL_VERSION) - pmd->flower_support = 1; - if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >= - FLOWER_VLAN_KERNEL_VERSION) - pmd->flower_vlan_support = 1; - return 1; -} - static int eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, - char *remote_iface) + char *remote_iface, int fixed_mac_type) { int numa_node = rte_socket_id(); struct rte_eth_dev *dev; struct pmd_internals *pmd; struct rte_eth_dev_data *data; + struct ifreq ifr; int i; RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id()); @@ -1146,18 +1349,18 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node); if (!data) { RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n"); - goto error_exit; + goto error_exit_nodev; } dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd)); if (!dev) { RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n"); - goto error_exit; + goto error_exit_nodev; } pmd = dev->data->dev_private; + pmd->dev = dev; snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name); - pmd->nb_queues = RTE_PMD_TAP_MAX_QUEUES; pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0); if (pmd->ioctl_sock == -1) { @@ -1170,14 +1373,14 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, /* Setup some default values */ rte_memcpy(data, dev->data, sizeof(*data)); data->dev_private = pmd; - data->dev_flags = RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC; + data->dev_flags = RTE_ETH_DEV_INTR_LSC; data->numa_node = numa_node; - data->drv_name = pmd_tap_drv.driver.name; data->dev_link = pmd_link; data->mac_addrs = &pmd->eth_addr; - data->nb_rx_queues = pmd->nb_queues; - data->nb_tx_queues = pmd->nb_queues; + /* Set the number of RX and TX queues */ + data->nb_rx_queues = 0; + data->nb_tx_queues = 0; dev->data = data; dev->dev_ops = &ops; @@ -1186,6 +1389,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, pmd->intr_handle.type = RTE_INTR_HANDLE_EXT; pmd->intr_handle.fd = -1; + dev->intr_handle = &pmd->intr_handle; /* Presetup the fds to -1 as being not valid */ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { @@ -1193,41 +1397,141 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name, pmd->txq[i].fd = -1; } - tap_kernel_support(pmd); - if (!pmd->flower_support) - return 0; - LIST_INIT(&pmd->flows); + if (fixed_mac_type) { + /* fixed mac = 00:64:74:61:70: */ + static int iface_idx; + char mac[ETHER_ADDR_LEN] = "\0dtap"; + + mac[ETHER_ADDR_LEN - 1] = iface_idx++; + rte_memcpy(&pmd->eth_addr, mac, ETHER_ADDR_LEN); + } else { + eth_random_addr((uint8_t *)&pmd->eth_addr); + } + + /* Immediately create the netdevice (this will create the 1st queue). */ + /* rx queue */ + if (tap_setup_queue(dev, pmd, 0, 1) == -1) + goto error_exit; + /* tx queue */ + if (tap_setup_queue(dev, pmd, 0, 0) == -1) + goto error_exit; + + ifr.ifr_mtu = dev->data->mtu; + if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0) + goto error_exit; + + memset(&ifr, 0, sizeof(struct ifreq)); + ifr.ifr_hwaddr.sa_family = AF_LOCAL; + rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, ETHER_ADDR_LEN); + if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) + goto error_exit; + /* - * If no netlink socket can be created, then it will fail when - * creating/destroying flow rules. + * Set up everything related to rte_flow: + * - netlink socket + * - tap / remote if_index + * - mandatory QDISCs + * - rte_flow actual/implicit lists + * - implicit rules */ - pmd->nlsk_fd = nl_init(0); - if (strlen(remote_iface)) { - struct ifreq ifr; + pmd->nlsk_fd = tap_nl_init(0); + if (pmd->nlsk_fd == -1) { + RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n", + pmd->name); + goto disable_rte_flow; + } + pmd->if_index = if_nametoindex(pmd->name); + if (!pmd->if_index) { + RTE_LOG(ERR, PMD, "%s: failed to get if_index.\n", pmd->name); + goto disable_rte_flow; + } + if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) { + RTE_LOG(ERR, PMD, "%s: failed to create multiq qdisc.\n", + pmd->name); + goto disable_rte_flow; + } + if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) { + RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n", + pmd->name); + goto disable_rte_flow; + } + LIST_INIT(&pmd->flows); + if (strlen(remote_iface)) { pmd->remote_if_index = if_nametoindex(remote_iface); - snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN, - "%s", remote_iface); if (!pmd->remote_if_index) { - RTE_LOG(ERR, PMD, "Could not find %s ifindex: " - "remote interface will remain unconfigured\n", - remote_iface); - return 0; + RTE_LOG(ERR, PMD, "%s: failed to get %s if_index.\n", + pmd->name, remote_iface); + goto error_remote; } + snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN, + "%s", remote_iface); + + /* Save state of remote device */ + tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY); + + /* Replicate remote MAC address */ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) { - RTE_LOG(ERR, PMD, "Could not get remote MAC address\n"); - goto error_exit; + RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n", + pmd->name, pmd->remote_iface); + goto error_remote; } rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN); - } else { - eth_random_addr((uint8_t *)&pmd->eth_addr); + /* The desired MAC is already in ifreq after SIOCGIFHWADDR. */ + if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) { + RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n", + pmd->name, remote_iface); + goto error_remote; + } + + /* + * Flush usually returns negative value because it tries to + * delete every QDISC (and on a running device, one QDISC at + * least is needed). Ignore negative return value. + */ + qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index); + if (qdisc_create_ingress(pmd->nlsk_fd, + pmd->remote_if_index) < 0) { + RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n", + pmd->remote_iface); + goto error_remote; + } + LIST_INIT(&pmd->implicit_flows); + if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 || + tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 || + tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 || + tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) { + RTE_LOG(ERR, PMD, + "%s: failed to create implicit rules.\n", + pmd->name); + goto error_remote; + } } return 0; +disable_rte_flow: + RTE_LOG(ERR, PMD, " Disabling rte flow support: %s(%d)\n", + strerror(errno), errno); + if (strlen(remote_iface)) { + RTE_LOG(ERR, PMD, "Remote feature requires flow support.\n"); + goto error_exit; + } + return 0; + +error_remote: + RTE_LOG(ERR, PMD, " Can't set up remote feature: %s(%d)\n", + strerror(errno), errno); + tap_flow_implicit_flush(pmd, NULL); + error_exit: - RTE_LOG(DEBUG, PMD, "TAP Unable to initialize %s\n", + if (pmd->ioctl_sock > 0) + close(pmd->ioctl_sock); + rte_eth_dev_release_port(dev); + +error_exit_nodev: + RTE_LOG(ERR, PMD, "TAP Unable to initialize %s\n", rte_vdev_device_name(vdev)); rte_free(data); @@ -1250,16 +1554,6 @@ set_interface_name(const char *key __rte_unused, return 0; } -static int -set_interface_speed(const char *key __rte_unused, - const char *value, - void *extra_args) -{ - *(int *)extra_args = (value) ? atoi(value) : ETH_SPEED_NUM_10G; - - return 0; -} - static int set_remote_iface(const char *key __rte_unused, const char *value, @@ -1273,6 +1567,17 @@ set_remote_iface(const char *key __rte_unused, return 0; } +static int +set_mac_type(const char *key __rte_unused, + const char *value, + void *extra_args) +{ + if (value && + !strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) + *(int *)extra_args = 1; + return 0; +} + /* Open a TAP interface device. */ static int @@ -1284,6 +1589,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) int speed; char tap_name[RTE_ETH_NAME_MAX_LEN]; char remote_iface[RTE_ETH_NAME_MAX_LEN]; + int fixed_mac_type = 0; name = rte_vdev_device_name(dev); params = rte_vdev_device_args(dev); @@ -1294,19 +1600,10 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN); if (params && (params[0] != '\0')) { - RTE_LOG(DEBUG, PMD, "paramaters (%s)\n", params); + RTE_LOG(DEBUG, PMD, "parameters (%s)\n", params); kvlist = rte_kvargs_parse(params, valid_arguments); if (kvlist) { - if (rte_kvargs_count(kvlist, ETH_TAP_SPEED_ARG) == 1) { - ret = rte_kvargs_process(kvlist, - ETH_TAP_SPEED_ARG, - &set_interface_speed, - &speed); - if (ret == -1) - goto leave; - } - if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) { ret = rte_kvargs_process(kvlist, ETH_TAP_IFACE_ARG, @@ -1324,6 +1621,15 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) if (ret == -1) goto leave; } + + if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) { + ret = rte_kvargs_process(kvlist, + ETH_TAP_MAC_ARG, + &set_mac_type, + &fixed_mac_type); + if (ret == -1) + goto leave; + } } } pmd_link.link_speed = speed; @@ -1331,7 +1637,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev) RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n", name, tap_name); - ret = eth_dev_tap_create(dev, tap_name, remote_iface); + ret = eth_dev_tap_create(dev, tap_name, remote_iface, fixed_mac_type); leave: if (ret == -1) { @@ -1362,14 +1668,21 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev) return 0; internals = eth_dev->data->dev_private; - if (internals->flower_support && internals->nlsk_fd) { + if (internals->nlsk_fd) { tap_flow_flush(eth_dev, NULL); tap_flow_implicit_flush(internals, NULL); - nl_final(internals->nlsk_fd); + tap_nl_final(internals->nlsk_fd); } - for (i = 0; i < internals->nb_queues; i++) - if (internals->rxq[i].fd != -1) + for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) { + if (internals->rxq[i].fd != -1) { close(internals->rxq[i].fd); + internals->rxq[i].fd = -1; + } + if (internals->txq[i].fd != -1) { + close(internals->txq[i].fd); + internals->txq[i].fd = -1; + } + } close(internals->ioctl_sock); rte_free(eth_dev->data->dev_private); @@ -1386,4 +1699,7 @@ static struct rte_vdev_driver pmd_tap_drv = { }; RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv); RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap); -RTE_PMD_REGISTER_PARAM_STRING(net_tap, "iface=,speed=N"); +RTE_PMD_REGISTER_PARAM_STRING(net_tap, + ETH_TAP_IFACE_ARG "= " + ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_FIXED " " + ETH_TAP_REMOTE_ARG "=");