net: add rte prefix to ether structures
[dpdk.git] / examples / tep_termination / vxlan_setup.c
index 54d7984..e4af7bc 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
  */
 
 #include <getopt.h>
@@ -49,7 +20,7 @@
 #include <rte_tcp.h>
 
 #include "main.h"
-#include "rte_virtio_net.h"
+#include "rte_vhost.h"
 #include "vxlan.h"
 #include "vxlan_setup.h"
 
 #define RTE_TEST_RX_DESC_DEFAULT 1024
 #define RTE_TEST_TX_DESC_DEFAULT 512
 
+/* Default inner VLAN ID */
+#define INNER_VLAN_ID 100
+
 /* VXLAN device */
 struct vxlan_conf vxdev;
 
 struct ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
-struct ether_hdr app_l2_hdr[VXLAN_N_PORTS];
+struct rte_ether_hdr app_l2_hdr[VXLAN_N_PORTS];
 
 /* local VTEP IP address */
 uint8_t vxlan_multicast_ips[2][4] = { {239, 1, 1, 1 }, {239, 1, 2, 1 } };
@@ -86,18 +60,26 @@ uint8_t vxlan_overlay_ips[2][4] = { {192, 168, 10, 1}, {192, 168, 30, 1} };
 /* Remote VTEP MAC address */
 uint8_t peer_mac[6] = {0x00, 0x11, 0x01, 0x00, 0x00, 0x01};
 
+/* VXLAN RX filter type */
+uint8_t tep_filter_type[] = {RTE_TUNNEL_FILTER_IMAC_TENID,
+                       RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+                       RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,};
+
 /* Options for configuring ethernet port */
-static const struct rte_eth_conf port_conf = {
+static struct rte_eth_conf port_conf = {
        .rxmode = {
                .split_hdr_size = 0,
-               .header_split   = 0, /**< Header Split disabled */
-               .hw_ip_checksum = 0, /**< IP checksum offload disabled */
-               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
-               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
-               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
        },
        .txmode = {
                .mq_mode = ETH_MQ_TX_NONE,
+               .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
+                            DEV_TX_OFFLOAD_UDP_CKSUM |
+                            DEV_TX_OFFLOAD_TCP_CKSUM |
+                            DEV_TX_OFFLOAD_SCTP_CKSUM |
+                            DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                            DEV_TX_OFFLOAD_TCP_TSO |
+                            DEV_TX_OFFLOAD_MULTI_SEGS |
+                            DEV_TX_OFFLOAD_VXLAN_TNL_TSO),
        },
 };
 
@@ -121,18 +103,19 @@ const uint16_t tenant_id_conf[] = {
  * coming from the mbuf_pool passed as parameter
  */
 int
-vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 {
        int retval;
        uint16_t q;
        struct rte_eth_dev_info dev_info;
        uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
-       const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
-       const uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+       uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
+       uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
        struct rte_eth_udp_tunnel tunnel_udp;
        struct rte_eth_rxconf *rxconf;
        struct rte_eth_txconf *txconf;
        struct vxlan_conf *pconf = &vxdev;
+       struct rte_eth_conf local_port_conf = port_conf;
 
        pconf->dst_port = udp_port;
 
@@ -146,19 +129,27 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 
        rxconf = &dev_info.default_rxconf;
        txconf = &dev_info.default_txconf;
-       txconf->txq_flags = 0;
 
-       if (port >= rte_eth_dev_count())
+       if (!rte_eth_dev_is_valid_port(port))
                return -1;
 
        rx_rings = nb_devices;
-
+       if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+               local_port_conf.txmode.offloads |=
+                       DEV_TX_OFFLOAD_MBUF_FAST_FREE;
        /* Configure ethernet device. */
-       retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+       retval = rte_eth_dev_configure(port, rx_rings, tx_rings,
+                                      &local_port_conf);
+       if (retval != 0)
+               return retval;
+
+       retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+                       &tx_ring_size);
        if (retval != 0)
                return retval;
 
        /* Setup the queues. */
+       rxconf->offloads = local_port_conf.rxmode.offloads;
        for (q = 0; q < rx_rings; q++) {
                retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
                                                rte_eth_dev_socket_id(port),
@@ -167,6 +158,7 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
                if (retval < 0)
                        return retval;
        }
+       txconf->offloads = local_port_conf.txmode.offloads;
        for (q = 0; q < tx_rings; q++) {
                retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
                                                rte_eth_dev_socket_id(port),
@@ -183,13 +175,13 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
        /* Configure UDP port for UDP tunneling */
        tunnel_udp.udp_port = udp_port;
        tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
-       retval = rte_eth_dev_udp_tunnel_add(port, &tunnel_udp);
+       retval = rte_eth_dev_udp_tunnel_port_add(port, &tunnel_udp);
        if (retval < 0)
                return retval;
        rte_eth_macaddr_get(port, &ports_eth_addr[port]);
        RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
                        " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
-                       (unsigned)port,
+                       port,
                        ports_eth_addr[port].addr_bytes[0],
                        ports_eth_addr[port].addr_bytes[1],
                        ports_eth_addr[port].addr_bytes[2],
@@ -197,19 +189,33 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
                        ports_eth_addr[port].addr_bytes[4],
                        ports_eth_addr[port].addr_bytes[5]);
 
+       if (tso_segsz != 0) {
+               struct rte_eth_dev_info dev_info;
+               rte_eth_dev_info_get(port, &dev_info);
+               if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0)
+                       RTE_LOG(WARNING, PORT,
+                               "hardware TSO offload is not supported\n");
+       }
        return 0;
 }
 
 static int
 vxlan_rx_process(struct rte_mbuf *pkt)
 {
-       return decapsulation(pkt);
+       int ret = 0;
+
+       if (rx_decap)
+               ret = decapsulation(pkt);
+
+       return ret;
 }
 
 static void
 vxlan_tx_process(uint8_t queue_id, struct rte_mbuf *pkt)
 {
-       encapsulation(pkt, queue_id);
+       if (tx_encap)
+               encapsulation(pkt, queue_id);
+
        return;
 }
 
@@ -220,27 +226,28 @@ vxlan_tx_process(uint8_t queue_id, struct rte_mbuf *pkt)
 int
 vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
 {
-       int i;
-       struct ether_hdr *pkt_hdr;
-       struct virtio_net *dev = vdev->dev;
-       uint64_t portid = dev->device_fh;
+       int i, ret;
+       struct rte_ether_hdr *pkt_hdr;
+       uint64_t portid = vdev->vid;
        struct ipv4_hdr *ip;
 
-       if (unlikely(portid > VXLAN_N_PORTS)) {
+       struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
+
+       if (unlikely(portid >= VXLAN_N_PORTS)) {
                RTE_LOG(INFO, VHOST_DATA,
-                       "(%"PRIu64") WARNING: Not configuring device,"
+                       "(%d) WARNING: Not configuring device,"
                        "as already have %d ports for VXLAN.",
-                       dev->device_fh, VXLAN_N_PORTS);
+                       vdev->vid, VXLAN_N_PORTS);
                return -1;
        }
 
        /* Learn MAC address of guest device from packet */
-       pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
        if (is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) {
                RTE_LOG(INFO, VHOST_DATA,
-                       "(%"PRIu64") WARNING: This device is using an existing"
+                       "(%d) WARNING: This device is using an existing"
                        " MAC address and has not been registered.\n",
-                       dev->device_fh);
+                       vdev->vid);
                return -1;
        }
 
@@ -251,6 +258,34 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
                vxdev.port[portid].peer_mac.addr_bytes[i] = peer_mac[i];
        }
 
+       memset(&tunnel_filter_conf, 0,
+               sizeof(struct rte_eth_tunnel_filter_conf));
+
+       ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac);
+       tunnel_filter_conf.filter_type = tep_filter_type[filter_idx];
+
+       /* inner MAC */
+       ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac);
+
+       tunnel_filter_conf.queue_id = vdev->rx_q;
+       tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q];
+
+       if (tep_filter_type[filter_idx] == RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID)
+               tunnel_filter_conf.inner_vlan = INNER_VLAN_ID;
+
+       tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+       ret = rte_eth_dev_filter_ctrl(ports[0],
+               RTE_ETH_FILTER_TUNNEL,
+               RTE_ETH_FILTER_ADD,
+               &tunnel_filter_conf);
+       if (ret) {
+               RTE_LOG(ERR, VHOST_DATA,
+                       "%d Failed to add device MAC address to cloud filter\n",
+               vdev->rx_q);
+               return -1;
+       }
+
        /* Print out inner MAC and VNI info. */
        RTE_LOG(INFO, VHOST_DATA,
                "(%d) MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VNI %d registered\n",
@@ -306,9 +341,36 @@ void
 vxlan_unlink(struct vhost_dev *vdev)
 {
        unsigned i = 0, rx_count;
+       int ret;
        struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
 
        if (vdev->ready == DEVICE_RX) {
+               memset(&tunnel_filter_conf, 0,
+                       sizeof(struct rte_eth_tunnel_filter_conf));
+
+               ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac);
+               ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac);
+               tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q];
+               tunnel_filter_conf.filter_type = tep_filter_type[filter_idx];
+
+               if (tep_filter_type[filter_idx] ==
+                       RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID)
+                       tunnel_filter_conf.inner_vlan = INNER_VLAN_ID;
+
+               tunnel_filter_conf.queue_id = vdev->rx_q;
+               tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+               ret = rte_eth_dev_filter_ctrl(ports[0],
+                               RTE_ETH_FILTER_TUNNEL,
+                               RTE_ETH_FILTER_DELETE,
+                               &tunnel_filter_conf);
+               if (ret) {
+                       RTE_LOG(ERR, VHOST_DATA,
+                               "%d Failed to add device MAC address to cloud filter\n",
+                               vdev->rx_q);
+                       return;
+               }
                for (i = 0; i < ETHER_ADDR_LEN; i++)
                        vdev->mac_address.addr_bytes[i] = 0;
 
@@ -331,7 +393,7 @@ vxlan_unlink(struct vhost_dev *vdev)
 
 /* Transmit packets after encapsulating */
 int
-vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
+vxlan_tx_pkts(uint16_t port_id, uint16_t queue_id,
                struct rte_mbuf **tx_pkts, uint16_t nb_pkts) {
        int ret = 0;
        uint16_t i;
@@ -346,8 +408,7 @@ vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
 
 /* Check for decapsulation and pass packets directly to VIRTIO device */
 int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
-               uint32_t rx_count)
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts_burst, uint32_t rx_count)
 {
        uint32_t i = 0;
        uint32_t count = 0;
@@ -355,6 +416,16 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
        struct rte_mbuf *pkts_valid[rx_count];
 
        for (i = 0; i < rx_count; i++) {
+               if (enable_stats) {
+                       rte_atomic64_add(
+                               &dev_statistics[vid].rx_bad_ip_csum,
+                               (pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
+                               != 0);
+                       rte_atomic64_add(
+                               &dev_statistics[vid].rx_bad_ip_csum,
+                               (pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
+                               != 0);
+               }
                ret = vxlan_rx_process(pkts_burst[i]);
                if (unlikely(ret < 0))
                        continue;
@@ -363,6 +434,6 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
                        count++;
        }
 
-       ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, count);
+       ret = rte_vhost_enqueue_burst(vid, VIRTIO_RXQ, pkts_valid, count);
        return ret;
 }