X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fvhost%2Fmain.c;h=22309977cebd6dc89eaef70f08961dd31fd3fef3;hb=ecf1474a618ba77ef3c4471fdc004a8c5f3b4d11;hp=59a1aff07c5a952572685361b68f355243247648;hpb=abec60e7115dc3197ffc94a1eee9353b9b808600;p=dpdk.git diff --git a/examples/vhost/main.c b/examples/vhost/main.c index 59a1aff07c..22309977ce 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -634,6 +634,12 @@ us_vhost_parse_args(int argc, char **argv) if (!strncmp(long_option[option_index].name, "dma-type", MAX_LONG_OPT_SZ)) { + if (strlen(optarg) >= MAX_LONG_OPT_SZ) { + RTE_LOG(INFO, VHOST_CONFIG, + "Wrong DMA type\n"); + us_vhost_usage(prgname); + return -1; + } strcpy(dma_type, optarg); } @@ -803,13 +809,16 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev, struct rte_mbuf *m) { uint16_t ret; - struct rte_mbuf *m_cpl[1]; + struct rte_mbuf *m_cpl[1], *comp_pkt; + uint32_t nr_comp = 0; if (builtin_net_driver) { ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1); } else if (async_vhost_driver) { ret = rte_vhost_submit_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, - &m, 1); + &m, 1, &comp_pkt, &nr_comp); + if (nr_comp == 1) + goto done; if (likely(ret)) dst_vdev->nr_async_pkts++; @@ -823,6 +832,7 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev, ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1); } +done: if (enable_stats) { rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic); rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret); @@ -1084,7 +1094,8 @@ static __rte_always_inline void drain_eth_rx(struct vhost_dev *vdev) { uint16_t rx_count, enqueue_count; - struct rte_mbuf *pkts[MAX_PKT_BURST]; + struct rte_mbuf *pkts[MAX_PKT_BURST], *comp_pkts[MAX_PKT_BURST]; + uint32_t nr_comp = 0; rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q, pkts, MAX_PKT_BURST); @@ -1118,7 +1129,12 @@ drain_eth_rx(struct vhost_dev *vdev) pkts, rx_count); } else if (async_vhost_driver) { enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid, - VIRTIO_RXQ, pkts, rx_count); + VIRTIO_RXQ, pkts, rx_count, comp_pkts, + &nr_comp); + if (nr_comp > 0) { + free_pkts(comp_pkts, nr_comp); + enqueue_count -= nr_comp; + } vdev->nr_async_pkts += enqueue_count; } else { enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ, @@ -1294,13 +1310,6 @@ new_device(int vid) int lcore, core_add = 0; uint32_t device_num_min = num_devices; struct vhost_dev *vdev; - - struct rte_vhost_async_channel_ops channel_ops = { - .transfer_data = ioat_transfer_data_cb, - .check_completed_copies = ioat_check_completed_copies_cb - }; - struct rte_vhost_async_features f; - vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE); if (vdev == NULL) { RTE_LOG(INFO, VHOST_DATA, @@ -1342,10 +1351,17 @@ new_device(int vid) vid, vdev->coreid); if (async_vhost_driver) { - f.async_inorder = 1; - f.async_threshold = 256; - return rte_vhost_async_channel_register(vid, VIRTIO_RXQ, - f.intval, &channel_ops); + struct rte_vhost_async_features f; + struct rte_vhost_async_channel_ops channel_ops; + if (strncmp(dma_type, "ioat", 4) == 0) { + channel_ops.transfer_data = ioat_transfer_data_cb; + channel_ops.check_completed_copies = + ioat_check_completed_copies_cb; + f.async_inorder = 1; + f.async_threshold = 256; + return rte_vhost_async_channel_register(vid, VIRTIO_RXQ, + f.intval, &channel_ops); + } } return 0;