#define rte_cio_rmb() rte_rmb()
+static __rte_always_inline void
+rte_atomic_thread_fence(int memory_order)
+{
+ __atomic_thread_fence(memory_order);
+}
+
#ifdef __cplusplus
}
#endif
#define rte_cio_rmb() rte_rmb()
+static __rte_always_inline void
+rte_atomic_thread_fence(int memory_order)
+{
+ __atomic_thread_fence(memory_order);
+}
+
/*------------------------ 128 bit atomic operations -------------------------*/
#if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS)
asm volatile ("" : : : "memory"); \
} while(0)
+/**
+ * Synchronization fence between threads based on the specified memory order.
+ */
+static inline void rte_atomic_thread_fence(int memory_order);
+
/*------------------------- 16 bit atomic operations -------------------------*/
/**
#define rte_cio_rmb() rte_rmb()
+static __rte_always_inline void
+rte_atomic_thread_fence(int memory_order)
+{
+ __atomic_thread_fence(memory_order);
+}
+
/*------------------------- 16 bit atomic operations -------------------------*/
/* To be compatible with Power7, use GCC built-in functions for 16 bit
* operations */
#define rte_cio_rmb() rte_compiler_barrier()
+/**
+ * Synchronization fence between threads based on the specified memory order.
+ *
+ * On x86 the __atomic_thread_fence(__ATOMIC_SEQ_CST) generates full 'mfence'
+ * which is quite expensive. The optimized implementation of rte_smp_mb is
+ * used instead.
+ */
+static __rte_always_inline void
+rte_atomic_thread_fence(int memory_order)
+{
+ if (memory_order == __ATOMIC_SEQ_CST)
+ rte_smp_mb();
+ else
+ __atomic_thread_fence(memory_order);
+}
+
/*------------------------- 16 bit atomic operations -------------------------*/
#ifndef RTE_FORCE_INTRINSICS