used_idx -= vq->size;
}
- rte_smp_wmb();
+ /* The ordering for storing desc flags needs to be enforced. */
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
for (i = 0; i < vq->shadow_used_idx; i++) {
uint16_t flags;
struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
- rte_smp_wmb();
- vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
+ /* desc flags is the synchronization point for virtio packed vring */
+ __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
+ used_elem->flags, __ATOMIC_RELEASE);
vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
sizeof(struct vring_packed_desc),
vq->desc_packed[vq->last_used_idx + i].len = lens[i];
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
vq->desc_packed[vq->last_used_idx + i].flags = flags;
vq->desc_packed[vq->last_used_idx + i].len = 0;
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
vq->desc_packed[vq->last_used_idx + i].flags = flags;
queue_id, 0, count - vq->async_last_pkts_n);
n_pkts_cpl += vq->async_last_pkts_n;
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
while (likely((n_pkts_put < count) && n_inflight)) {
uint16_t info_idx = (start_idx + n_pkts_put) & (vq_size - 1);
return -1;
}
- rte_smp_rmb();
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
lens[i] = descs[avail_idx + i].len;