#include <rte_tcp.h>
#include "main.h"
-#include "rte_virtio_net.h"
+#include "rte_vhost.h"
#include "vxlan.h"
#include "vxlan_setup.h"
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
uint16_t q;
struct rte_eth_dev_info dev_info;
uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
- const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
- const uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
+ uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
struct rte_eth_udp_tunnel tunnel_udp;
struct rte_eth_rxconf *rxconf;
struct rte_eth_txconf *txconf;
if (retval != 0)
return retval;
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
+ &tx_ring_size);
+ if (retval != 0)
+ return retval;
+
/* Setup the queues. */
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
/* Configure UDP port for UDP tunneling */
tunnel_udp.udp_port = udp_port;
tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
- retval = rte_eth_dev_udp_tunnel_add(port, &tunnel_udp);
+ retval = rte_eth_dev_udp_tunnel_port_add(port, &tunnel_udp);
if (retval < 0)
return retval;
rte_eth_macaddr_get(port, &ports_eth_addr[port]);
static int
vxlan_rx_process(struct rte_mbuf *pkt)
{
- return decapsulation(pkt);
+ int ret = 0;
+
+ if (rx_decap)
+ ret = decapsulation(pkt);
+
+ return ret;
}
static void
vxlan_tx_process(uint8_t queue_id, struct rte_mbuf *pkt)
{
- encapsulation(pkt, queue_id);
+ if (tx_encap)
+ encapsulation(pkt, queue_id);
+
return;
}
{
int i, ret;
struct ether_hdr *pkt_hdr;
- struct virtio_net *dev = vdev->dev;
- uint64_t portid = dev->device_fh;
+ uint64_t portid = vdev->vid;
struct ipv4_hdr *ip;
struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
- if (unlikely(portid > VXLAN_N_PORTS)) {
+ if (unlikely(portid >= VXLAN_N_PORTS)) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") WARNING: Not configuring device,"
+ "(%d) WARNING: Not configuring device,"
"as already have %d ports for VXLAN.",
- dev->device_fh, VXLAN_N_PORTS);
+ vdev->vid, VXLAN_N_PORTS);
return -1;
}
pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
if (is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) {
RTE_LOG(INFO, VHOST_DATA,
- "(%"PRIu64") WARNING: This device is using an existing"
+ "(%d) WARNING: This device is using an existing"
" MAC address and has not been registered.\n",
- dev->device_fh);
+ vdev->vid);
return -1;
}
memset(&tunnel_filter_conf, 0,
sizeof(struct rte_eth_tunnel_filter_conf));
- tunnel_filter_conf.outer_mac = &ports_eth_addr[0];
+ ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac);
tunnel_filter_conf.filter_type = tep_filter_type[filter_idx];
/* inner MAC */
- tunnel_filter_conf.inner_mac = &vdev->mac_address;
+ ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac);
tunnel_filter_conf.queue_id = vdev->rx_q;
tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q];
memset(&tunnel_filter_conf, 0,
sizeof(struct rte_eth_tunnel_filter_conf));
- tunnel_filter_conf.outer_mac = &ports_eth_addr[0];
- tunnel_filter_conf.inner_mac = &vdev->mac_address;
+ ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac);
+ ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac);
tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q];
tunnel_filter_conf.filter_type = tep_filter_type[filter_idx];
/* Check for decapsulation and pass packets directly to VIRTIO device */
int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
- uint32_t rx_count)
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts_burst, uint32_t rx_count)
{
uint32_t i = 0;
uint32_t count = 0;
for (i = 0; i < rx_count; i++) {
if (enable_stats) {
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_bad_ip_csum,
+ &dev_statistics[vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
!= 0);
rte_atomic64_add(
- &dev_statistics[dev->device_fh].rx_bad_ip_csum,
+ &dev_statistics[vid].rx_bad_ip_csum,
(pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
!= 0);
}
count++;
}
- ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, count);
+ ret = rte_vhost_enqueue_burst(vid, VIRTIO_RXQ, pkts_valid, count);
return ret;
}