From f41516c309d7ac96827ed5c7801211bcb6680f6d Mon Sep 17 00:00:00 2001 From: Marvin Liu Date: Fri, 25 Oct 2019 00:08:27 +0800 Subject: [PATCH 1/1] vhost: flush batched enqueue descs directly Flush used elements when batched enqueue function is finished. Descriptor's flags are pre-calculated as they will be reset by vhost. Signed-off-by: Marvin Liu Reviewed-by: Gavin Hu Reviewed-by: Maxime Coquelin --- lib/librte_vhost/vhost.h | 3 +++ lib/librte_vhost/virtio_net.c | 36 +++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index d594464423..f8dbe841c2 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -39,6 +39,9 @@ #define VHOST_LOG_CACHE_NR 32 +#define PACKED_DESC_ENQUEUE_USED_FLAG(w) \ + ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \ + VRING_DESC_F_WRITE) #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \ VRING_DESC_F_INDIRECT) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 25bffdd524..51ce320649 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -154,6 +154,36 @@ vhost_flush_enqueue_shadow_packed(struct virtio_net *dev, vhost_log_cache_sync(dev, vq); } +static __rte_always_inline void +vhost_flush_enqueue_batch_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + uint64_t *lens, + uint16_t *ids) +{ + uint16_t i; + uint16_t flags; + + flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + vq->desc_packed[vq->last_used_idx + i].id = ids[i]; + vq->desc_packed[vq->last_used_idx + i].len = lens[i]; + } + + rte_smp_wmb(); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + vq->desc_packed[vq->last_used_idx + i].flags = flags; + + vhost_log_cache_used_vring(dev, vq, vq->last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc) * + PACKED_BATCH_SIZE); + vhost_log_cache_sync(dev, vq); + + vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE); +} + static __rte_always_inline void flush_shadow_used_ring_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) @@ -992,6 +1022,7 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev, struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE]; uint32_t buf_offset = dev->vhost_hlen; uint64_t lens[PACKED_BATCH_SIZE]; + uint16_t ids[PACKED_BATCH_SIZE]; uint16_t i; if (unlikely(avail_idx & PACKED_BATCH_MASK)) @@ -1047,6 +1078,11 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev, pkts[i]->pkt_len); } + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + ids[i] = descs[avail_idx + i].id; + + vhost_flush_enqueue_batch_packed(dev, vq, lens, ids); + return 0; } -- 2.20.1