for (i = 0; i < num; i++) {
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
if (!desc_is_used(&desc[used_idx], vq))
return i;
- virtio_rmb(vq->hw->weak_barriers);
len[i] = desc[used_idx].len;
id = desc[used_idx].id;
cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
- virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
do {
curr_id = used_idx;
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ * and wait for used desc in virtqueue.
+ */
while (num-- && desc_is_used(&desc[used_idx], vq)) {
- virtio_rmb(vq->hw->weak_barriers);
id = desc[used_idx].id;
dxp = &vq->vq_descx[id];
vq->vq_used_cons_idx += dxp->ndescs;
if (vq->used_wrap_counter)
flags |= VRING_PACKED_DESC_F_AVAIL_USED;
- rte_smp_wmb();
- vring->desc[vq->used_idx].flags = flags;
+ __atomic_store_n(&vring->desc[vq->used_idx].flags, flags,
+ __ATOMIC_RELEASE);
vq->used_idx += n_descs;
if (vq->used_idx >= dev->queue_size) {
rte_cio_wmb();
}
+static inline uint16_t
+virtqueue_fetch_flags_packed(struct vring_packed_desc *dp,
+ uint8_t weak_barriers)
+{
+ uint16_t flags;
+
+ if (weak_barriers) {
+/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports
+ * a better perf(~1.5%), which comes from the saved branch by the compiler.
+ * The if and else branch are identical with the smp and cio barriers both
+ * defined as compiler barriers on x86.
+ */
+#ifdef RTE_ARCH_X86_64
+ flags = dp->flags;
+ rte_smp_rmb();
+#else
+ flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE);
+#endif
+ } else {
+ flags = dp->flags;
+ rte_cio_rmb();
+ }
+
+ return flags;
+}
+
static inline void
virtqueue_store_flags_packed(struct vring_packed_desc *dp,
uint16_t flags, uint8_t weak_barriers)
{
uint16_t used, avail, flags;
- flags = desc->flags;
+ flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers);
used = !!(flags & VRING_PACKED_DESC_F_USED);
avail = !!(flags & VRING_PACKED_DESC_F_AVAIL);
used_idx -= vq->size;
}
- rte_smp_wmb();
-
for (i = 0; i < vq->shadow_used_idx; i++) {
uint16_t flags;
}
}
- vq->desc_packed[head_idx].flags = head_flags;
+ __atomic_store_n(&vq->desc_packed[head_idx].flags, head_flags,
+ __ATOMIC_RELEASE);
vhost_log_cache_used_vring(dev, vq,
head_idx *