if (weak_barriers)
rte_smp_rmb();
else
- rte_cio_rmb();
+ rte_io_rmb();
}
static inline void
if (weak_barriers)
rte_smp_wmb();
else
- rte_cio_wmb();
+ rte_io_wmb();
}
static inline uint16_t
if (weak_barriers) {
/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
- * The if and else branch are identical with the smp and cio barriers both
+ * The if and else branch are identical with the smp and io barriers both
* defined as compiler barriers on x86.
*/
#ifdef RTE_ARCH_X86_64
#endif
} else {
flags = dp->flags;
- rte_cio_rmb();
+ rte_io_rmb();
}
return flags;
if (weak_barriers) {
/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports
* a better perf(~1.5%), which comes from the saved branch by the compiler.
- * The if and else branch are identical with the smp and cio barriers both
+ * The if and else branch are identical with the smp and io barriers both
* defined as compiler barriers on x86.
*/
#ifdef RTE_ARCH_X86_64
__atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE);
#endif
} else {
- rte_cio_wmb();
+ rte_io_wmb();
dp->flags = flags;
}
}
return VTNET_TQ;
}
-/* virtqueue_nused has load-acquire or rte_cio_rmb insed */
+/* virtqueue_nused has load-acquire or rte_io_rmb insed */
static inline uint16_t
virtqueue_nused(const struct virtqueue *vq)
{
* x86 prefers to using rte_smp_rmb over __atomic_load_n as it
* reports a slightly better perf, which comes from the saved
* branch by the compiler.
- * The if and else branches are identical with the smp and cio
+ * The if and else branches are identical with the smp and io
* barriers both defined as compiler barriers on x86.
*/
#ifdef RTE_ARCH_X86_64
#endif
} else {
idx = vq->vq_split.ring.used->idx;
- rte_cio_rmb();
+ rte_io_rmb();
}
return idx - vq->vq_used_cons_idx;
}
* it reports a slightly better perf, which comes from the
* saved branch by the compiler.
* The if and else branches are identical with the smp and
- * cio barriers both defined as compiler barriers on x86.
+ * io barriers both defined as compiler barriers on x86.
*/
#ifdef RTE_ARCH_X86_64
rte_smp_wmb();
vq->vq_avail_idx, __ATOMIC_RELEASE);
#endif
} else {
- rte_cio_wmb();
+ rte_io_wmb();
vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
}
}
/* avoid write operation when necessary, to lessen cache issues */
#define ASSIGN_UNLESS_EQUAL(var, val) do { \
- typeof(var) var_ = (var); \
- typeof(val) val_ = (val); \
- if ((var_) != (val_)) \
- (var_) = (val_); \
+ typeof(var) *const var_ = &(var); \
+ typeof(val) const val_ = (val); \
+ if (*var_ != val_) \
+ *var_ = val_; \
} while (0)
#define virtqueue_clear_net_hdr(hdr) do { \
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
- /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ /* desc_is_used has a load-acquire or rte_io_rmb inside
* and wait for used desc in virtqueue.
*/
while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
struct vq_desc_extra *dxp;
used_idx = vq->vq_used_cons_idx;
- /* desc_is_used has a load-acquire or rte_cio_rmb inside
+ /* desc_is_used has a load-acquire or rte_io_rmb inside
* and wait for used desc in virtqueue.
*/
while (num-- && desc_is_used(&desc[used_idx], vq)) {