pci: introduce helpers for device name parsing/update
[dpdk.git] / examples / tep_termination / vxlan_setup.c
index 942fc06..8f1f15b 100644 (file)
@@ -191,7 +191,7 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
        /* Configure UDP port for UDP tunneling */
        tunnel_udp.udp_port = udp_port;
        tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
-       retval = rte_eth_dev_udp_tunnel_add(port, &tunnel_udp);
+       retval = rte_eth_dev_udp_tunnel_port_add(port, &tunnel_udp);
        if (retval < 0)
                return retval;
        rte_eth_macaddr_get(port, &ports_eth_addr[port]);
@@ -205,19 +205,33 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
                        ports_eth_addr[port].addr_bytes[4],
                        ports_eth_addr[port].addr_bytes[5]);
 
+       if (tso_segsz != 0) {
+               struct rte_eth_dev_info dev_info;
+               rte_eth_dev_info_get(port, &dev_info);
+               if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0)
+                       RTE_LOG(WARNING, PORT,
+                               "hardware TSO offload is not supported\n");
+       }
        return 0;
 }
 
 static int
 vxlan_rx_process(struct rte_mbuf *pkt)
 {
-       return decapsulation(pkt);
+       int ret = 0;
+
+       if (rx_decap)
+               ret = decapsulation(pkt);
+
+       return ret;
 }
 
 static void
 vxlan_tx_process(uint8_t queue_id, struct rte_mbuf *pkt)
 {
-       encapsulation(pkt, queue_id);
+       if (tx_encap)
+               encapsulation(pkt, queue_id);
+
        return;
 }
 
@@ -230,17 +244,16 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
 {
        int i, ret;
        struct ether_hdr *pkt_hdr;
-       struct virtio_net *dev = vdev->dev;
-       uint64_t portid = dev->device_fh;
+       uint64_t portid = vdev->vid;
        struct ipv4_hdr *ip;
 
        struct rte_eth_tunnel_filter_conf tunnel_filter_conf;
 
-       if (unlikely(portid > VXLAN_N_PORTS)) {
+       if (unlikely(portid >= VXLAN_N_PORTS)) {
                RTE_LOG(INFO, VHOST_DATA,
-                       "(%"PRIu64") WARNING: Not configuring device,"
+                       "(%d) WARNING: Not configuring device,"
                        "as already have %d ports for VXLAN.",
-                       dev->device_fh, VXLAN_N_PORTS);
+                       vdev->vid, VXLAN_N_PORTS);
                return -1;
        }
 
@@ -248,9 +261,9 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
        pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
        if (is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) {
                RTE_LOG(INFO, VHOST_DATA,
-                       "(%"PRIu64") WARNING: This device is using an existing"
+                       "(%d) WARNING: This device is using an existing"
                        " MAC address and has not been registered.\n",
-                       dev->device_fh);
+                       vdev->vid);
                return -1;
        }
 
@@ -264,11 +277,11 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m)
        memset(&tunnel_filter_conf, 0,
                sizeof(struct rte_eth_tunnel_filter_conf));
 
-       tunnel_filter_conf.outer_mac = &ports_eth_addr[0];
+       ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac);
        tunnel_filter_conf.filter_type = tep_filter_type[filter_idx];
 
        /* inner MAC */
-       tunnel_filter_conf.inner_mac = &vdev->mac_address;
+       ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac);
 
        tunnel_filter_conf.queue_id = vdev->rx_q;
        tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q];
@@ -352,8 +365,8 @@ vxlan_unlink(struct vhost_dev *vdev)
                memset(&tunnel_filter_conf, 0,
                        sizeof(struct rte_eth_tunnel_filter_conf));
 
-               tunnel_filter_conf.outer_mac = &ports_eth_addr[0];
-               tunnel_filter_conf.inner_mac = &vdev->mac_address;
+               ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac);
+               ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac);
                tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q];
                tunnel_filter_conf.filter_type = tep_filter_type[filter_idx];
 
@@ -411,8 +424,7 @@ vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
 
 /* Check for decapsulation and pass packets directly to VIRTIO device */
 int
-vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
-               uint32_t rx_count)
+vxlan_rx_pkts(int vid, struct rte_mbuf **pkts_burst, uint32_t rx_count)
 {
        uint32_t i = 0;
        uint32_t count = 0;
@@ -420,6 +432,16 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
        struct rte_mbuf *pkts_valid[rx_count];
 
        for (i = 0; i < rx_count; i++) {
+               if (enable_stats) {
+                       rte_atomic64_add(
+                               &dev_statistics[vid].rx_bad_ip_csum,
+                               (pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
+                               != 0);
+                       rte_atomic64_add(
+                               &dev_statistics[vid].rx_bad_ip_csum,
+                               (pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
+                               != 0);
+               }
                ret = vxlan_rx_process(pkts_burst[i]);
                if (unlikely(ret < 0))
                        continue;
@@ -428,6 +450,6 @@ vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst,
                        count++;
        }
 
-       ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, count);
+       ret = rte_vhost_enqueue_burst(vid, VIRTIO_RXQ, pkts_valid, count);
        return ret;
 }