vhost: replace SMP with thread fence for control path
[dpdk.git] / lib / librte_vhost / virtio_net.c
index c912ae3..fec08b2 100644 (file)
@@ -171,7 +171,8 @@ vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
                        used_idx -= vq->size;
        }
 
-       rte_smp_wmb();
+       /* The ordering for storing desc flags needs to be enforced. */
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
 
        for (i = 0; i < vq->shadow_used_idx; i++) {
                uint16_t flags;
@@ -222,8 +223,9 @@ vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
        struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
 
        vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
-       rte_smp_wmb();
-       vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
+       /* desc flags is the synchronization point for virtio packed vring */
+       __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
+                        used_elem->flags, __ATOMIC_RELEASE);
 
        vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
                                   sizeof(struct vring_packed_desc),
@@ -253,7 +255,7 @@ vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
                vq->desc_packed[vq->last_used_idx + i].len = lens[i];
        }
 
-       rte_smp_wmb();
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
                vq->desc_packed[vq->last_used_idx + i].flags = flags;
@@ -312,7 +314,7 @@ vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
                vq->desc_packed[vq->last_used_idx + i].len = 0;
        }
 
-       rte_smp_wmb();
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
        vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
                vq->desc_packed[vq->last_used_idx + i].flags = flags;
 
@@ -1661,7 +1663,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
                        queue_id, 0, count - vq->async_last_pkts_n);
        n_pkts_cpl += vq->async_last_pkts_n;
 
-       rte_smp_wmb();
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
 
        while (likely((n_pkts_put < count) && n_inflight)) {
                uint16_t info_idx = (start_idx + n_pkts_put) & (vq_size - 1);
@@ -2245,7 +2247,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
                        return -1;
        }
 
-       rte_smp_rmb();
+       rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
                lens[i] = descs[avail_idx + i].len;