vq->vq_desc_head_idx = dxp->next;
if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
- virtio_wmb(hw->weak_barriers);
- start_dp[idx].flags = flags;
+
+ virtqueue_store_flags_packed(&start_dp[idx], flags,
+ hw->weak_barriers);
+
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->vq_packed.cached_flags ^=
vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
}
- virtio_wmb(vq->hw->weak_barriers);
- dp->flags = flags;
+ virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
}
static inline void
vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
}
- virtio_wmb(vq->hw->weak_barriers);
- head_dp->flags = head_flags;
+ virtqueue_store_flags_packed(head_dp, head_flags,
+ vq->hw->weak_barriers);
}
static inline void
static inline int
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- uint16_t flags = desc->flags;
+ uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
return wrap_counter == !!(flags & VRING_PACKED_DESC_F_AVAIL) &&
wrap_counter != !!(flags & VRING_PACKED_DESC_F_USED);
struct vring_packed *vring = &dev->packed_vrings[queue_idx];
uint16_t n_descs, flags;
+ /* Perform a load-acquire barrier in desc_is_avail to
+ * enforce the ordering between desc flags and desc
+ * content.
+ */
while (desc_is_avail(&vring->desc[vq->used_idx],
vq->used_wrap_counter)) {
rte_cio_wmb();
}
+static inline void
+virtqueue_store_flags_packed(struct vring_packed_desc *dp,
+ uint16_t flags, uint8_t weak_barriers)
+{
+ if (weak_barriers) {
+/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports
+ * a better perf(~1.5%), which comes from the saved branch by the compiler.
+ * The if and else branch are identical with the smp and cio barriers both
+ * defined as compiler barriers on x86.
+ */
+#ifdef RTE_ARCH_X86_64
+ rte_smp_wmb();
+ dp->flags = flags;
+#else
+ __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
+#endif
+ } else {
+ rte_cio_wmb();
+ dp->flags = flags;
+ }
+}
#ifdef RTE_PMD_PACKET_PREFETCH
#define rte_packet_prefetch(p) rte_prefetch1(p)
#else
static inline bool
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- uint16_t flags = *((volatile uint16_t *) &desc->flags);
+ uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
wrap_counter != !!(flags & VRING_DESC_F_USED);
if (avail_idx < vq->last_avail_idx)
wrap_counter ^= 1;
- if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
- return -1;
-
/*
- * The ordering between desc flags and desc
- * content reads need to be enforced.
+ * Perform a load-acquire barrier in desc_is_avail to
+ * enforce the ordering between desc flags and desc
+ * content.
*/
- rte_smp_rmb();
+ if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
+ return -1;
*desc_count = 0;
*len = 0;