net/virtio: improve perf via one-way barrier on avail flag
authorJoyce Kong <joyce.kong@arm.com>
Tue, 17 Sep 2019 05:28:25 +0000 (13:28 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 23 Oct 2019 14:43:09 +0000 (16:43 +0200)
In case VIRTIO_F_ORDER_PLATFORM(36) is not negotiated, then the frontend
and backend are assumed to be implemented in software, that is they can
run on identical CPUs in an SMP configuration.
Thus a weak form of memory barriers like rte_smp_r/wmb, other than
rte_cio_r/wmb, is sufficient for this case(vq->hw->weak_barriers == 1)
and yields better performance.
For the above case, this patch helps yielding even better performance
by replacing the two-way barriers with C11 one-way barriers for avail
flags in packed ring.

Meanwhile, a read barrier is required to ensure ordering between
descriptor's flags and content reads [1]. With C11, load-acquire can
enforce the ordering instead of rmb barrier.

[1] https://patchwork.dpdk.org/patch/49109/

Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Phil Yang <phil.yang@arm.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
drivers/net/virtio/virtio_rxtx.c
drivers/net/virtio/virtio_user/virtio_user_dev.c
drivers/net/virtio/virtqueue.h
lib/librte_vhost/vhost.h
lib/librte_vhost/virtio_net.c

index 929aa4c..bf7bae3 100644 (file)
@@ -498,8 +498,10 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
                vq->vq_desc_head_idx = dxp->next;
                if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
                        vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
-               virtio_wmb(hw->weak_barriers);
-               start_dp[idx].flags = flags;
+
+               virtqueue_store_flags_packed(&start_dp[idx], flags,
+                                            hw->weak_barriers);
+
                if (++vq->vq_avail_idx >= vq->vq_nentries) {
                        vq->vq_avail_idx -= vq->vq_nentries;
                        vq->vq_packed.cached_flags ^=
@@ -713,8 +715,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
                        vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
        }
 
-       virtio_wmb(vq->hw->weak_barriers);
-       dp->flags = flags;
+       virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
 }
 
 static inline void
@@ -808,8 +809,8 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                        vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
        }
 
-       virtio_wmb(vq->hw->weak_barriers);
-       head_dp->flags = head_flags;
+       virtqueue_store_flags_packed(head_dp, head_flags,
+                                    vq->hw->weak_barriers);
 }
 
 static inline void
index fab87eb..7911c39 100644 (file)
@@ -624,7 +624,7 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
 static inline int
 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
 {
-       uint16_t flags = desc->flags;
+       uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
 
        return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
                wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
@@ -684,6 +684,10 @@ virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
        struct vring_packed *vring = &dev->packed_vrings[queue_idx];
        uint16_t n_descs, flags;
 
+       /* Perform a load-acquire barrier in desc_is_avail to
+        * enforce the ordering between desc flags and desc
+        * content.
+        */
        while (desc_is_avail(&vring->desc[vq->used_idx],
                             vq->used_wrap_counter)) {
 
index c6dd4a3..b728ff8 100644 (file)
@@ -54,6 +54,27 @@ virtio_wmb(uint8_t weak_barriers)
                rte_cio_wmb();
 }
 
+static inline void
+virtqueue_store_flags_packed(struct vring_packed_desc *dp,
+                             uint16_t flags, uint8_t weak_barriers)
+{
+       if (weak_barriers) {
+/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports
+ * a better perf(~1.5%), which comes from the saved branch by the compiler.
+ * The if and else branch are identical with the smp and cio barriers both
+ * defined as compiler barriers on x86.
+ */
+#ifdef RTE_ARCH_X86_64
+               rte_smp_wmb();
+               dp->flags = flags;
+#else
+               __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+#endif
+       } else {
+               rte_cio_wmb();
+               dp->flags = flags;
+       }
+}
 #ifdef RTE_PMD_PACKET_PREFETCH
 #define rte_packet_prefetch(p)  rte_prefetch1(p)
 #else
index 5131a97..099a0d3 100644 (file)
@@ -344,7 +344,7 @@ vq_is_packed(struct virtio_net *dev)
 static inline bool
 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
 {
-       uint16_t flags = *((volatile uint16_t *) &desc->flags);
+       uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
 
        return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
                wrap_counter != !!(flags & VRING_DESC_F_USED);
index 5b85b83..e7463ff 100644 (file)
@@ -503,14 +503,13 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
        if (avail_idx < vq->last_avail_idx)
                wrap_counter ^= 1;
 
-       if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
-               return -1;
-
        /*
-        * The ordering between desc flags and desc
-        * content reads need to be enforced.
+        * Perform a load-acquire barrier in desc_is_avail to
+        * enforce the ordering between desc flags and desc
+        * content.
         */
-       rte_smp_rmb();
+       if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
+               return -1;
 
        *desc_count = 0;
        *len = 0;