X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Ftep_termination%2Fvxlan_setup.c;h=4b44ccc143078e67d4838038156649944a062526;hb=d0e160a00233b00ba6d242d5fc054438caae6873;hp=52e404c0b59b8b96af0186efc91e301b0a1ccff0;hpb=e2a1dd1275881ffcdcb9af0760ccea0e75f7c024;p=dpdk.git diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c index 52e404c0b5..4b44ccc143 100644 --- a/examples/tep_termination/vxlan_setup.c +++ b/examples/tep_termination/vxlan_setup.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation */ #include @@ -49,7 +20,7 @@ #include #include "main.h" -#include "rte_virtio_net.h" +#include "rte_vhost.h" #include "vxlan.h" #include "vxlan_setup.h" @@ -57,10 +28,7 @@ #define UDP_HEADER_LEN 8 #define VXLAN_HEADER_LEN 8 -#define IP_VERSION 0x40 -#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */ #define IP_DEFTTL 64 /* from RFC 1340. */ -#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN) #define IP_DN_FRAGMENT_FLAG 0x0040 @@ -77,8 +45,8 @@ /* VXLAN device */ struct vxlan_conf vxdev; -struct ipv4_hdr app_ip_hdr[VXLAN_N_PORTS]; -struct ether_hdr app_l2_hdr[VXLAN_N_PORTS]; +struct rte_ipv4_hdr app_ip_hdr[VXLAN_N_PORTS]; +struct rte_ether_hdr app_l2_hdr[VXLAN_N_PORTS]; /* local VTEP IP address */ uint8_t vxlan_multicast_ips[2][4] = { {239, 1, 1, 1 }, {239, 1, 2, 1 } }; @@ -95,17 +63,20 @@ uint8_t tep_filter_type[] = {RTE_TUNNEL_FILTER_IMAC_TENID, RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,}; /* Options for configuring ethernet port */ -static const struct rte_eth_conf port_conf = { +static struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 0, /**< CRC stripped by hardware */ }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, + .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO), }, }; @@ -129,22 +100,27 @@ const uint16_t tenant_id_conf[] = { * coming from the mbuf_pool passed as parameter */ int -vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool) +vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool) { int retval; uint16_t q; struct rte_eth_dev_info dev_info; uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count(); - const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT; - const uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; + uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT; + uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; struct rte_eth_udp_tunnel tunnel_udp; struct rte_eth_rxconf *rxconf; struct rte_eth_txconf *txconf; struct vxlan_conf *pconf = &vxdev; + struct rte_eth_conf local_port_conf = port_conf; pconf->dst_port = udp_port; - rte_eth_dev_info_get(port, &dev_info); + retval = rte_eth_dev_info_get(port, &dev_info); + if (retval != 0) + rte_exit(EXIT_FAILURE, + "Error during getting device (port %u) info: %s\n", + port, strerror(-retval)); if (dev_info.max_rx_queues > MAX_QUEUES) { rte_exit(EXIT_FAILURE, @@ -154,19 +130,27 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool) rxconf = &dev_info.default_rxconf; txconf = &dev_info.default_txconf; - txconf->txq_flags = 0; - if (port >= rte_eth_dev_count()) + if (!rte_eth_dev_is_valid_port(port)) return -1; rx_rings = nb_devices; - + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; /* Configure ethernet device. */ - retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); + retval = rte_eth_dev_configure(port, rx_rings, tx_rings, + &local_port_conf); + if (retval != 0) + return retval; + + retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size, + &tx_ring_size); if (retval != 0) return retval; /* Setup the queues. */ + rxconf->offloads = local_port_conf.rxmode.offloads; for (q = 0; q < rx_rings; q++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, rte_eth_dev_socket_id(port), @@ -175,6 +159,7 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool) if (retval < 0) return retval; } + txconf->offloads = local_port_conf.txmode.offloads; for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, rte_eth_dev_socket_id(port), @@ -194,10 +179,13 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool) retval = rte_eth_dev_udp_tunnel_port_add(port, &tunnel_udp); if (retval < 0) return retval; - rte_eth_macaddr_get(port, &ports_eth_addr[port]); + retval = rte_eth_macaddr_get(port, &ports_eth_addr[port]); + if (retval < 0) + return retval; + RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", - (unsigned)port, + port, ports_eth_addr[port].addr_bytes[0], ports_eth_addr[port].addr_bytes[1], ports_eth_addr[port].addr_bytes[2], @@ -206,8 +194,6 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool) ports_eth_addr[port].addr_bytes[5]); if (tso_segsz != 0) { - struct rte_eth_dev_info dev_info; - rte_eth_dev_info_get(port, &dev_info); if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) RTE_LOG(WARNING, PORT, "hardware TSO offload is not supported\n"); @@ -243,32 +229,31 @@ int vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m) { int i, ret; - struct ether_hdr *pkt_hdr; - struct virtio_net *dev = vdev->dev; - uint64_t portid = dev->vid; - struct ipv4_hdr *ip; + struct rte_ether_hdr *pkt_hdr; + uint64_t portid = vdev->vid; + struct rte_ipv4_hdr *ip; struct rte_eth_tunnel_filter_conf tunnel_filter_conf; - if (unlikely(portid > VXLAN_N_PORTS)) { + if (unlikely(portid >= VXLAN_N_PORTS)) { RTE_LOG(INFO, VHOST_DATA, "(%d) WARNING: Not configuring device," "as already have %d ports for VXLAN.", - dev->vid, VXLAN_N_PORTS); + vdev->vid, VXLAN_N_PORTS); return -1; } /* Learn MAC address of guest device from packet */ - pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); - if (is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) { + pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + if (rte_is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) { RTE_LOG(INFO, VHOST_DATA, "(%d) WARNING: This device is using an existing" " MAC address and has not been registered.\n", - dev->vid); + vdev->vid); return -1; } - for (i = 0; i < ETHER_ADDR_LEN; i++) { + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) { vdev->mac_address.addr_bytes[i] = vxdev.port[portid].vport_mac.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i]; @@ -278,11 +263,11 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m) memset(&tunnel_filter_conf, 0, sizeof(struct rte_eth_tunnel_filter_conf)); - ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac); + rte_ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac); tunnel_filter_conf.filter_type = tep_filter_type[filter_idx]; /* inner MAC */ - ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac); + rte_ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac); tunnel_filter_conf.queue_id = vdev->rx_q; tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q]; @@ -326,14 +311,14 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m) } vxdev.out_key = tenant_id_conf[vdev->rx_q]; - ether_addr_copy(&vxdev.port[portid].peer_mac, + rte_ether_addr_copy(&vxdev.port[portid].peer_mac, &app_l2_hdr[portid].d_addr); - ether_addr_copy(&ports_eth_addr[0], + rte_ether_addr_copy(&ports_eth_addr[0], &app_l2_hdr[portid].s_addr); - app_l2_hdr[portid].ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + app_l2_hdr[portid].ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); ip = &app_ip_hdr[portid]; - ip->version_ihl = IP_VHL_DEF; + ip->version_ihl = RTE_IPV4_VHL_DEF; ip->type_of_service = 0; ip->total_length = 0; ip->packet_id = 0; @@ -366,8 +351,10 @@ vxlan_unlink(struct vhost_dev *vdev) memset(&tunnel_filter_conf, 0, sizeof(struct rte_eth_tunnel_filter_conf)); - ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac); - ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac); + rte_ether_addr_copy(&ports_eth_addr[0], + &tunnel_filter_conf.outer_mac); + rte_ether_addr_copy(&vdev->mac_address, + &tunnel_filter_conf.inner_mac); tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q]; tunnel_filter_conf.filter_type = tep_filter_type[filter_idx]; @@ -388,7 +375,7 @@ vxlan_unlink(struct vhost_dev *vdev) vdev->rx_q); return; } - for (i = 0; i < ETHER_ADDR_LEN; i++) + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) vdev->mac_address.addr_bytes[i] = 0; /* Clear out the receive buffers */ @@ -410,7 +397,7 @@ vxlan_unlink(struct vhost_dev *vdev) /* Transmit packets after encapsulating */ int -vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id, +vxlan_tx_pkts(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { int ret = 0; uint16_t i; @@ -425,8 +412,7 @@ vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id, /* Check for decapsulation and pass packets directly to VIRTIO device */ int -vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst, - uint32_t rx_count) +vxlan_rx_pkts(int vid, struct rte_mbuf **pkts_burst, uint32_t rx_count) { uint32_t i = 0; uint32_t count = 0; @@ -436,11 +422,11 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst, for (i = 0; i < rx_count; i++) { if (enable_stats) { rte_atomic64_add( - &dev_statistics[dev->vid].rx_bad_ip_csum, + &dev_statistics[vid].rx_bad_ip_csum, (pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD) != 0); rte_atomic64_add( - &dev_statistics[dev->vid].rx_bad_ip_csum, + &dev_statistics[vid].rx_bad_ip_csum, (pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD) != 0); } @@ -452,6 +438,6 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst, count++; } - ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, count); + ret = rte_vhost_enqueue_burst(vid, VIRTIO_RXQ, pkts_valid, count); return ret; }