vhost: flush batched enqueue descs directly
authorMarvin Liu <yong.liu@intel.com>
Thu, 24 Oct 2019 16:08:27 +0000 (00:08 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 25 Oct 2019 17:20:47 +0000 (19:20 +0200)
Flush used elements when batched enqueue function is finished.
Descriptor's flags are pre-calculated as they will be reset by vhost.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
lib/librte_vhost/vhost.h
lib/librte_vhost/virtio_net.c

index d594464..f8dbe84 100644 (file)
@@ -39,6 +39,9 @@
 
 #define VHOST_LOG_CACHE_NR 32
 
+#define PACKED_DESC_ENQUEUE_USED_FLAG(w)       \
+       ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
+               VRING_DESC_F_WRITE)
 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
                                         VRING_DESC_F_INDIRECT)
 
index 25bffdd..51ce320 100644 (file)
@@ -154,6 +154,36 @@ vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
        vhost_log_cache_sync(dev, vq);
 }
 
+static __rte_always_inline void
+vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
+                                struct vhost_virtqueue *vq,
+                                uint64_t *lens,
+                                uint16_t *ids)
+{
+       uint16_t i;
+       uint16_t flags;
+
+       flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               vq->desc_packed[vq->last_used_idx + i].id = ids[i];
+               vq->desc_packed[vq->last_used_idx + i].len = lens[i];
+       }
+
+       rte_smp_wmb();
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+               vq->desc_packed[vq->last_used_idx + i].flags = flags;
+
+       vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
+                                  sizeof(struct vring_packed_desc),
+                                  sizeof(struct vring_packed_desc) *
+                                  PACKED_BATCH_SIZE);
+       vhost_log_cache_sync(dev, vq);
+
+       vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
+}
+
 static __rte_always_inline void
 flush_shadow_used_ring_packed(struct virtio_net *dev,
                        struct vhost_virtqueue *vq)
@@ -992,6 +1022,7 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev,
        struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
        uint32_t buf_offset = dev->vhost_hlen;
        uint64_t lens[PACKED_BATCH_SIZE];
+       uint16_t ids[PACKED_BATCH_SIZE];
        uint16_t i;
 
        if (unlikely(avail_idx & PACKED_BATCH_MASK))
@@ -1047,6 +1078,11 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev,
                           pkts[i]->pkt_len);
        }
 
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+               ids[i] = descs[avail_idx + i].id;
+
+       vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
+
        return 0;
 }