examples/vhost_blk: replace SMP barrier with thread fence
[dpdk.git] / lib / librte_vhost / virtio_net.c
index a05d44f..6c51286 100644 (file)
@@ -1491,8 +1491,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
        struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
        struct rte_vhost_iov_iter *src_it = it_pool;
        struct rte_vhost_iov_iter *dst_it = it_pool + 1;
-       uint16_t n_free_slot, slot_idx;
+       uint16_t n_free_slot, slot_idx = 0;
        uint16_t pkt_err = 0;
+       uint16_t segs_await = 0;
        struct async_inflight_info *pkts_info = vq->async_pkts_info;
        int n_pkts = 0;
 
@@ -1541,6 +1542,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
                        dst_iovec += dst_it->nr_segs;
                        src_it += 2;
                        dst_it += 2;
+                       segs_await += src_it->nr_segs;
                } else {
                        pkts_info[slot_idx].info = num_buffers;
                        vq->async_pkts_inflight_n++;
@@ -1548,15 +1550,24 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 
                vq->last_avail_idx += num_buffers;
 
+               /*
+                * conditions to trigger async device transfer:
+                * - buffered packet number reaches transfer threshold
+                * - this is the last packet in the burst enqueue
+                * - unused async iov number is less than max vhost vector
+                */
                if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
-                               (pkt_idx == count - 1 && pkt_burst_idx)) {
+                       (pkt_idx == count - 1 && pkt_burst_idx) ||
+                       (VHOST_MAX_ASYNC_VEC / 2 - segs_await <
+                       BUF_VECTOR_MAX)) {
                        n_pkts = vq->async_ops.transfer_data(dev->vid,
                                        queue_id, tdes, 0, pkt_burst_idx);
                        src_iovec = vec_pool;
                        dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
                        src_it = it_pool;
                        dst_it = it_pool + 1;
-                       vq->async_pkts_inflight_n += n_pkts;
+                       segs_await = 0;
+                       vq->async_pkts_inflight_n += pkt_burst_idx;
 
                        if (unlikely(n_pkts < (int)pkt_burst_idx)) {
                                /*
@@ -1576,7 +1587,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
        if (pkt_burst_idx) {
                n_pkts = vq->async_ops.transfer_data(dev->vid,
                                queue_id, tdes, 0, pkt_burst_idx);
-               vq->async_pkts_inflight_n += n_pkts;
+               vq->async_pkts_inflight_n += pkt_burst_idx;
 
                if (unlikely(n_pkts < (int)pkt_burst_idx))
                        pkt_err = pkt_burst_idx - n_pkts;
@@ -1703,7 +1714,6 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
 {
        struct vhost_virtqueue *vq;
        uint32_t nb_tx = 0;
-       bool drawback = false;
 
        VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
        if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
@@ -1716,13 +1726,8 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
 
        rte_spinlock_lock(&vq->access_lock);
 
-       if (unlikely(vq->enabled == 0))
-               goto out_access_unlock;
-
-       if (unlikely(!vq->async_registered)) {
-               drawback = true;
+       if (unlikely(vq->enabled == 0 || !vq->async_registered))
                goto out_access_unlock;
-       }
 
        if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
                vhost_user_iotlb_rd_lock(vq);
@@ -1749,9 +1754,6 @@ out:
 out_access_unlock:
        rte_spinlock_unlock(&vq->access_lock);
 
-       if (drawback)
-               return rte_vhost_enqueue_burst(dev->vid, queue_id, pkts, count);
-
        return nb_tx;
 }