-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <getopt.h>
#include <rte_tcp.h>
#include "main.h"
-#include "rte_virtio_net.h"
+#include "rte_vhost.h"
#include "vxlan.h"
#include "vxlan_setup.h"
#define UDP_HEADER_LEN 8
#define VXLAN_HEADER_LEN 8
-#define IP_VERSION 0x40
-#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
#define IP_DEFTTL 64 /* from RFC 1340. */
-#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
#define IP_DN_FRAGMENT_FLAG 0x0040
/* VXLAN device */
struct vxlan_conf vxdev;
-struct ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
-struct ether_hdr app_l2_hdr[VXLAN_N_PORTS];
+struct rte_ipv4_hdr app_ip_hdr[VXLAN_N_PORTS];
+struct rte_ether_hdr app_l2_hdr[VXLAN_N_PORTS];
/* local VTEP IP address */
uint8_t vxlan_multicast_ips[2][4] = { {239, 1, 1, 1 }, {239, 1, 2, 1 } };
RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,};
/* Options for configuring ethernet port */
-static const struct rte_eth_conf port_conf = {
+static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
+ .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO),
},
};
* coming from the mbuf_pool passed as parameter
*/
int
-vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
int retval;
uint16_t q;
struct rte_eth_dev_info dev_info;
uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
- const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
- const uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+ uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
struct rte_eth_udp_tunnel tunnel_udp;
struct rte_eth_rxconf *rxconf;
struct rte_eth_txconf *txconf;
struct vxlan_conf *pconf = &vxdev;
+ struct rte_eth_conf local_port_conf = port_conf;
pconf->dst_port = udp_port;
- rte_eth_dev_info_get(port, &dev_info);
+ retval = rte_eth_dev_info_get(port, &dev_info);
+ if (retval != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ port, strerror(-retval));
if (dev_info.max_rx_queues > MAX_QUEUES) {
rte_exit(EXIT_FAILURE,
rxconf = &dev_info.default_rxconf;
txconf = &dev_info.default_txconf;
- txconf->txq_flags = 0;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
rx_rings = nb_devices;
-
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure ethernet device. */
- retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+ retval = rte_eth_dev_configure(port, rx_rings, tx_rings,
+ &local_port_conf);
+ if (retval != 0)
+ return retval;
+
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+ &tx_ring_size);
if (retval != 0)
return retval;
/* Setup the queues. */
+ rxconf->offloads = local_port_conf.rxmode.offloads;
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
rte_eth_dev_socket_id(port),
if (retval < 0)
return retval;
}
+ txconf->offloads = local_port_conf.txmode.offloads;
for (q = 0; q < tx_rings; q++) {
retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
rte_eth_dev_socket_id(port),
/* Configure UDP port for UDP tunneling */
tunnel_udp.udp_port = udp_port;
tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
- retval = rte_eth_dev_udp_tunnel_add(port, &tunnel_udp);
+ retval = rte_eth_dev_udp_tunnel_port_add(port, &tunnel_udp);
+ if (retval < 0)
+ return retval;
+ retval = rte_eth_macaddr_get(port, &ports_eth_addr[port]);
if (retval < 0)
return retval;
- rte_eth_macaddr_get(port, &ports_eth_addr[port]);
+
RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
+ port,
ports_eth_addr[port].addr_bytes[0],
ports_eth_addr[port].addr_bytes[1],
ports_eth_addr[port].addr_bytes[2],
ports_eth_addr[port].addr_bytes[5]);
if (tso_segsz != 0) {
- struct rte_eth_dev_info dev_info;
- rte_eth_dev_info_get(port, &dev_info);
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0)
RTE_LOG(WARNING, PORT,
"hardware TSO offload is not supported\n");
static int
vxlan_rx_process(struct rte_mbuf *pkt)
{
- return decapsulation(pkt);
+ int ret = 0;
+
+ if (rx_decap)
+ ret = decapsulation(pkt);
+
+ return ret;
}
static void
vxlan_tx_process(uint8_t queue_id, struct rte_mbuf *pkt)
{
- encapsulation(pkt, queue_id);
+ if (tx_encap)
+ encapsulation(pkt, queue_id);
+
return;
}
vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
{
int i, ret;
- struct ether_hdr *pkt_hdr;
- struct virtio_net *dev = vdev->dev;
- uint64_t portid = dev->device_fh;
- struct ipv4_hdr *ip;
+ struct rte_ether_hdr *pkt_hdr;
+ uint64_t portid = vdev->vid;
+ struct rte_ipv4_hdr *ip;
struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
- if (unlikely(portid > VXLAN_N_PORTS)) {
+ if (unlikely(portid >= VXLAN_N_PORTS)) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") WARNING: Not configuring device,"
+ "(%d) WARNING: Not configuring device,"
"as already have %d ports for VXLAN.",
- dev->device_fh, VXLAN_N_PORTS);
+ vdev->vid, VXLAN_N_PORTS);
return -1;
}
/* Learn MAC address of guest device from packet */
- pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- if (is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) {
+ pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ if (rte_is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") WARNING: This device is using an existing"
+ "(%d) WARNING: This device is using an existing"
" MAC address and has not been registered.\n",
- dev->device_fh);
+ vdev->vid);
return -1;
}
- for (i = 0; i < ETHER_ADDR_LEN; i++) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
vdev->mac_address.addr_bytes[i] =
vxdev.port[portid].vport_mac.addr_bytes[i] =
pkt_hdr->s_addr.addr_bytes[i];
memset(&tunnel_filter_conf, 0,
sizeof(struct rte_eth_tunnel_filter_conf));
- tunnel_filter_conf.outer_mac = &ports_eth_addr[0];
+ rte_ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac);
tunnel_filter_conf.filter_type = tep_filter_type[filter_idx];
/* inner MAC */
- tunnel_filter_conf.inner_mac = &vdev->mac_address;
+ rte_ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac);
tunnel_filter_conf.queue_id = vdev->rx_q;
tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q];
}
vxdev.out_key = tenant_id_conf[vdev->rx_q];
- ether_addr_copy(&vxdev.port[portid].peer_mac,
+ rte_ether_addr_copy(&vxdev.port[portid].peer_mac,
&app_l2_hdr[portid].d_addr);
- ether_addr_copy(&ports_eth_addr[0],
+ rte_ether_addr_copy(&ports_eth_addr[0],
&app_l2_hdr[portid].s_addr);
- app_l2_hdr[portid].ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ app_l2_hdr[portid].ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
ip = &app_ip_hdr[portid];
- ip->version_ihl = IP_VHL_DEF;
+ ip->version_ihl = RTE_IPV4_VHL_DEF;
ip->type_of_service = 0;
ip->total_length = 0;
ip->packet_id = 0;
memset(&tunnel_filter_conf, 0,
sizeof(struct rte_eth_tunnel_filter_conf));
- tunnel_filter_conf.outer_mac = &ports_eth_addr[0];
- tunnel_filter_conf.inner_mac = &vdev->mac_address;
+ rte_ether_addr_copy(&ports_eth_addr[0],
+ &tunnel_filter_conf.outer_mac);
+ rte_ether_addr_copy(&vdev->mac_address,
+ &tunnel_filter_conf.inner_mac);
tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q];
tunnel_filter_conf.filter_type = tep_filter_type[filter_idx];
vdev->rx_q);
return;
}
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
vdev->mac_address.addr_bytes[i] = 0;
/* Clear out the receive buffers */
/* Transmit packets after encapsulating */
int
-vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
+vxlan_tx_pkts(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts) {
int ret = 0;
uint16_t i;
/* Check for decapsulation and pass packets directly to VIRTIO device */
int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
- uint32_t rx_count)
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts_burst, uint32_t rx_count)
{
uint32_t i = 0;
uint32_t count = 0;
struct rte_mbuf *pkts_valid[rx_count];
for (i = 0; i < rx_count; i++) {
+ if (enable_stats) {
+ rte_atomic64_add(
+ &dev_statistics[vid].rx_bad_ip_csum,
+ (pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
+ != 0);
+ rte_atomic64_add(
+ &dev_statistics[vid].rx_bad_ip_csum,
+ (pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
+ != 0);
+ }
ret = vxlan_rx_process(pkts_burst[i]);
if (unlikely(ret < 0))
continue;
count++;
}
- ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, count);
+ ret = rte_vhost_enqueue_burst(vid, VIRTIO_RXQ, pkts_valid, count);
return ret;
}