app/crypto-perf: support lookaside IPsec
[dpdk.git] / examples / vhost / main.c
index a27bc95..b24fd82 100644 (file)
@@ -369,14 +369,8 @@ port_init(uint16_t port)
 
        RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
        RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
-                       " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
-                       port,
-                       vmdq_ports_eth_addr[port].addr_bytes[0],
-                       vmdq_ports_eth_addr[port].addr_bytes[1],
-                       vmdq_ports_eth_addr[port].addr_bytes[2],
-                       vmdq_ports_eth_addr[port].addr_bytes[3],
-                       vmdq_ports_eth_addr[port].addr_bytes[4],
-                       vmdq_ports_eth_addr[port].addr_bytes[5]);
+               " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
+               port, RTE_ETHER_ADDR_BYTES(&vmdq_ports_eth_addr[port]));
 
        return 0;
 }
@@ -763,7 +757,7 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
        /* Learn MAC address of guest device from packet */
        pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       if (find_vhost_dev(&pkt_hdr->s_addr)) {
+       if (find_vhost_dev(&pkt_hdr->src_addr)) {
                RTE_LOG(ERR, VHOST_DATA,
                        "(%d) device is using a registered MAC!\n",
                        vdev->vid);
@@ -771,7 +765,8 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
        }
 
        for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
-               vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
+               vdev->mac_address.addr_bytes[i] =
+                       pkt_hdr->src_addr.addr_bytes[i];
 
        /* vlan_tag currently uses the device_id. */
        vdev->vlan_tag = vlan_tags[vdev->vid];
@@ -779,10 +774,7 @@ link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
        /* Print out VMDQ registration info. */
        RTE_LOG(INFO, VHOST_DATA,
                "(%d) mac " RTE_ETHER_ADDR_PRT_FMT " and vlan %d registered\n",
-               vdev->vid,
-               vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
-               vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
-               vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
+               vdev->vid, RTE_ETHER_ADDR_BYTES(&vdev->mac_address),
                vdev->vlan_tag);
 
        /* Register the MAC address. */
@@ -891,17 +883,11 @@ drain_vhost(struct vhost_dev *vdev)
        if (builtin_net_driver) {
                ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
        } else if (async_vhost_driver) {
-               uint32_t cpu_cpl_nr = 0;
                uint16_t enqueue_fail = 0;
-               struct rte_mbuf *m_cpu_cpl[nr_xmit];
 
                complete_async_pkts(vdev);
-               ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
-                                       m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
-               __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST);
-
-               if (cpu_cpl_nr)
-                       free_pkts(m_cpu_cpl, cpu_cpl_nr);
+               ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit);
+               __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
 
                enqueue_fail = nr_xmit - ret;
                if (enqueue_fail)
@@ -960,7 +946,7 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
        uint16_t lcore_id = rte_lcore_id();
        pkt_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+       dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
        if (!dst_vdev)
                return -1;
 
@@ -1008,7 +994,7 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
        struct rte_ether_hdr *pkt_hdr =
                rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
+       dst_vdev = find_vhost_dev(&pkt_hdr->dst_addr);
        if (!dst_vdev)
                return 0;
 
@@ -1091,7 +1077,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
 
 
        nh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
-       if (unlikely(rte_is_broadcast_ether_addr(&nh->d_addr))) {
+       if (unlikely(rte_is_broadcast_ether_addr(&nh->dst_addr))) {
                struct vhost_dev *vdev2;
 
                TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
@@ -1222,19 +1208,12 @@ drain_eth_rx(struct vhost_dev *vdev)
                enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
                                                pkts, rx_count);
        } else if (async_vhost_driver) {
-               uint32_t cpu_cpl_nr = 0;
                uint16_t enqueue_fail = 0;
-               struct rte_mbuf *m_cpu_cpl[MAX_PKT_BURST];
 
                complete_async_pkts(vdev);
                enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
-                                       VIRTIO_RXQ, pkts, rx_count,
-                                       m_cpu_cpl, &cpu_cpl_nr);
-               __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr,
-                                       __ATOMIC_SEQ_CST);
-
-               if (cpu_cpl_nr)
-                       free_pkts(m_cpu_cpl, cpu_cpl_nr);
+                                       VIRTIO_RXQ, pkts, rx_count);
+               __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
 
                enqueue_fail = rx_count - enqueue_count;
                if (enqueue_fail)
@@ -1495,7 +1474,6 @@ new_device(int vid)
                                ioat_check_completed_copies_cb;
 
                        config.features = RTE_VHOST_ASYNC_INORDER;
-                       config.async_threshold = 256;
 
                        return rte_vhost_async_channel_register(vid, VIRTIO_RXQ,
                                config, &channel_ops);