From 6094557de086702961800fdb1159670231a9f218 Mon Sep 17 00:00:00 2001 From: Joyce Kong Date: Tue, 17 Sep 2019 13:28:25 +0800 Subject: [PATCH] net/virtio: improve perf via one-way barrier on avail flag In case VIRTIO_F_ORDER_PLATFORM(36) is not negotiated, then the frontend and backend are assumed to be implemented in software, that is they can run on identical CPUs in an SMP configuration. Thus a weak form of memory barriers like rte_smp_r/wmb, other than rte_cio_r/wmb, is sufficient for this case(vq->hw->weak_barriers == 1) and yields better performance. For the above case, this patch helps yielding even better performance by replacing the two-way barriers with C11 one-way barriers for avail flags in packed ring. Meanwhile, a read barrier is required to ensure ordering between descriptor's flags and content reads [1]. With C11, load-acquire can enforce the ordering instead of rmb barrier. [1] https://patchwork.dpdk.org/patch/49109/ Signed-off-by: Joyce Kong Reviewed-by: Gavin Hu Reviewed-by: Phil Yang Reviewed-by: Maxime Coquelin --- drivers/net/virtio/virtio_rxtx.c | 13 ++++++------ .../net/virtio/virtio_user/virtio_user_dev.c | 6 +++++- drivers/net/virtio/virtqueue.h | 21 +++++++++++++++++++ lib/librte_vhost/vhost.h | 2 +- lib/librte_vhost/virtio_net.c | 11 +++++----- 5 files changed, 39 insertions(+), 14 deletions(-) diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 929aa4cbd3..bf7bae314b 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -498,8 +498,10 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, vq->vq_desc_head_idx = dxp->next; if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) vq->vq_desc_tail_idx = vq->vq_desc_head_idx; - virtio_wmb(hw->weak_barriers); - start_dp[idx].flags = flags; + + virtqueue_store_flags_packed(&start_dp[idx], flags, + hw->weak_barriers); + if (++vq->vq_avail_idx >= vq->vq_nentries) { vq->vq_avail_idx -= vq->vq_nentries; vq->vq_packed.cached_flags ^= @@ -713,8 +715,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq, vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; } - virtio_wmb(vq->hw->weak_barriers); - dp->flags = flags; + virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers); } static inline void @@ -808,8 +809,8 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; } - virtio_wmb(vq->hw->weak_barriers); - head_dp->flags = head_flags; + virtqueue_store_flags_packed(head_dp, head_flags, + vq->hw->weak_barriers); } static inline void diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index fab87eb5b6..7911c398be 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -624,7 +624,7 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, static inline int desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) { - uint16_t flags = desc->flags; + uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) && wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED); @@ -684,6 +684,10 @@ virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) struct vring_packed *vring = &dev->packed_vrings[queue_idx]; uint16_t n_descs, flags; + /* Perform a load-acquire barrier in desc_is_avail to + * enforce the ordering between desc flags and desc + * content. + */ while (desc_is_avail(&vring->desc[vq->used_idx], vq->used_wrap_counter)) { diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index c6dd4a347b..b728ff86d6 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -54,6 +54,27 @@ virtio_wmb(uint8_t weak_barriers) rte_cio_wmb(); } +static inline void +virtqueue_store_flags_packed(struct vring_packed_desc *dp, + uint16_t flags, uint8_t weak_barriers) +{ + if (weak_barriers) { +/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports + * a better perf(~1.5%), which comes from the saved branch by the compiler. + * The if and else branch are identical with the smp and cio barriers both + * defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + rte_smp_wmb(); + dp->flags = flags; +#else + __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE); +#endif + } else { + rte_cio_wmb(); + dp->flags = flags; + } +} #ifdef RTE_PMD_PACKET_PREFETCH #define rte_packet_prefetch(p) rte_prefetch1(p) #else diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 5131a97a39..099a0d3f6b 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -344,7 +344,7 @@ vq_is_packed(struct virtio_net *dev) static inline bool desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) { - uint16_t flags = *((volatile uint16_t *) &desc->flags); + uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) && wrap_counter != !!(flags & VRING_DESC_F_USED); diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 5b85b832d0..e7463ffba2 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -503,14 +503,13 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, if (avail_idx < vq->last_avail_idx) wrap_counter ^= 1; - if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter))) - return -1; - /* - * The ordering between desc flags and desc - * content reads need to be enforced. + * Perform a load-acquire barrier in desc_is_avail to + * enforce the ordering between desc flags and desc + * content. */ - rte_smp_rmb(); + if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter))) + return -1; *desc_count = 0; *len = 0; -- 2.20.1