examples/vhost: fix ioat dependency
[dpdk.git] / examples / vhost / main.c
index 08182ff..8d8c303 100644 (file)
@@ -634,6 +634,12 @@ us_vhost_parse_args(int argc, char **argv)
 
                        if (!strncmp(long_option[option_index].name,
                                                "dma-type", MAX_LONG_OPT_SZ)) {
+                               if (strlen(optarg) >= MAX_LONG_OPT_SZ) {
+                                       RTE_LOG(INFO, VHOST_CONFIG,
+                                               "Wrong DMA type\n");
+                                       us_vhost_usage(prgname);
+                                       return -1;
+                               }
                                strcpy(dma_type, optarg);
                        }
 
@@ -803,9 +809,22 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
            struct rte_mbuf *m)
 {
        uint16_t ret;
+       struct rte_mbuf *m_cpl[1];
 
        if (builtin_net_driver) {
                ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
+       } else if (async_vhost_driver) {
+               ret = rte_vhost_submit_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ,
+                                               &m, 1);
+
+               if (likely(ret))
+                       dst_vdev->nr_async_pkts++;
+
+               while (likely(dst_vdev->nr_async_pkts)) {
+                       if (rte_vhost_poll_enqueue_completed(dst_vdev->vid,
+                                       VIRTIO_RXQ, m_cpl, 1))
+                               dst_vdev->nr_async_pkts--;
+               }
        } else {
                ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
        }
@@ -1054,6 +1073,19 @@ drain_mbuf_table(struct mbuf_table *tx_q)
        }
 }
 
+static __rte_always_inline void
+complete_async_pkts(struct vhost_dev *vdev, uint16_t qid)
+{
+       struct rte_mbuf *p_cpl[MAX_PKT_BURST];
+       uint16_t complete_count;
+
+       complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
+                                               qid, p_cpl, MAX_PKT_BURST);
+       vdev->nr_async_pkts -= complete_count;
+       if (complete_count)
+               free_pkts(p_cpl, complete_count);
+}
+
 static __rte_always_inline void
 drain_eth_rx(struct vhost_dev *vdev)
 {
@@ -1062,6 +1094,10 @@ drain_eth_rx(struct vhost_dev *vdev)
 
        rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
                                    pkts, MAX_PKT_BURST);
+
+       while (likely(vdev->nr_async_pkts))
+               complete_async_pkts(vdev, VIRTIO_RXQ);
+
        if (!rx_count)
                return;
 
@@ -1086,16 +1122,22 @@ drain_eth_rx(struct vhost_dev *vdev)
        if (builtin_net_driver) {
                enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
                                                pkts, rx_count);
+       } else if (async_vhost_driver) {
+               enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
+                                       VIRTIO_RXQ, pkts, rx_count);
+               vdev->nr_async_pkts += enqueue_count;
        } else {
                enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
                                                pkts, rx_count);
        }
+
        if (enable_stats) {
                rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
                rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
        }
 
-       free_pkts(pkts, rx_count);
+       if (!async_vhost_driver)
+               free_pkts(pkts, rx_count);
 }
 
 static __rte_always_inline void
@@ -1242,6 +1284,9 @@ destroy_device(int vid)
                "(%d) device has been removed from data core\n",
                vdev->vid);
 
+       if (async_vhost_driver)
+               rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
+
        rte_free(vdev);
 }
 
@@ -1255,7 +1300,6 @@ new_device(int vid)
        int lcore, core_add = 0;
        uint32_t device_num_min = num_devices;
        struct vhost_dev *vdev;
-
        vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
        if (vdev == NULL) {
                RTE_LOG(INFO, VHOST_DATA,
@@ -1296,6 +1340,20 @@ new_device(int vid)
                "(%d) device has been added to data core %d\n",
                vid, vdev->coreid);
 
+       if (async_vhost_driver) {
+               struct rte_vhost_async_features f;
+               struct rte_vhost_async_channel_ops channel_ops;
+               if (strncmp(dma_type, "ioat", 4) == 0) {
+                       channel_ops.transfer_data = ioat_transfer_data_cb;
+                       channel_ops.check_completed_copies =
+                               ioat_check_completed_copies_cb;
+                       f.async_inorder = 1;
+                       f.async_threshold = 256;
+                       return rte_vhost_async_channel_register(vid, VIRTIO_RXQ,
+                               f.intval, &channel_ops);
+               }
+       }
+
        return 0;
 }
 
@@ -1534,6 +1592,9 @@ main(int argc, char *argv[])
        /* Register vhost user driver to handle vhost messages. */
        for (i = 0; i < nb_sockets; i++) {
                char *file = socket_files + i * PATH_MAX;
+               if (async_vhost_driver)
+                       flags = flags | RTE_VHOST_USER_ASYNC_COPY;
+
                ret = rte_vhost_driver_register(file, flags);
                if (ret != 0) {
                        unregister_drivers(i);