From: Maxime Coquelin Date: Tue, 23 Oct 2018 10:07:10 +0000 (+0200) Subject: vhost: avoid memory barriers when no descriptors dequeued X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=e988a6d84599f83321f8036dc45acf26a97ad7ea;p=dpdk.git vhost: avoid memory barriers when no descriptors dequeued In both split and packed dequeue paths, flush_shadow_used_ring and vhost_ring_call variants gets called even if not packets have been dequeued, and so no descriptors updates happened. It has an impact on CPU pipeline, as memory barriers are used in these functions. This patch don't call these functions if no descriptors have been dequeued. The performance gain with split ring when dequeue zero-copy is disabled should be null, but should be noticeable with packed ring or dequeue zero-copy enabled. Fixes: ae999ce49dcb ("vhost: add Tx support for packed ring") Fixes: 915cf9404225 ("vhost: use shadow used ring in dequeue path") Cc: stable@dpdk.org Signed-off-by: Maxime Coquelin Reviewed-by: Jens Freimann Tested-by: Jens Freimann Reviewed-by: Tiwei Bie --- diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 1c31c05625..8ad30c94a6 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -1360,8 +1360,10 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, } } - flush_shadow_used_ring_split(dev, vq); - vhost_vring_call_split(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } } rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); @@ -1440,8 +1442,10 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, do_data_copy_dequeue(vq); if (unlikely(i < count)) vq->shadow_used_idx = i; - flush_shadow_used_ring_split(dev, vq); - vhost_vring_call_split(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } } return i; @@ -1476,8 +1480,10 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, } } - flush_shadow_used_ring_packed(dev, vq); - vhost_vring_call_packed(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } } VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); @@ -1555,8 +1561,10 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, do_data_copy_dequeue(vq); if (unlikely(i < count)) vq->shadow_used_idx = i; - flush_shadow_used_ring_packed(dev, vq); - vhost_vring_call_packed(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } } return i;