eal: add wrapper for C11 atomic thread fence
authorPhil Yang <phil.yang@arm.com>
Fri, 17 Jul 2020 10:14:36 +0000 (18:14 +0800)
committerDavid Marchand <david.marchand@redhat.com>
Fri, 17 Jul 2020 14:00:30 +0000 (16:00 +0200)
Provide a wrapper for __atomic_thread_fence builtins to support
optimized code for __ATOMIC_SEQ_CST memory order for x86 platforms.

Suggested-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Signed-off-by: Phil Yang <phil.yang@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
lib/librte_eal/arm/include/rte_atomic_32.h
lib/librte_eal/arm/include/rte_atomic_64.h
lib/librte_eal/include/generic/rte_atomic.h
lib/librte_eal/ppc/include/rte_atomic.h
lib/librte_eal/x86/include/rte_atomic.h

index 7dc0d06..368f10c 100644 (file)
@@ -37,6 +37,12 @@ extern "C" {
 
 #define rte_cio_rmb() rte_rmb()
 
+static __rte_always_inline void
+rte_atomic_thread_fence(int memory_order)
+{
+       __atomic_thread_fence(memory_order);
+}
+
 #ifdef __cplusplus
 }
 #endif
index e42f69e..5cae52d 100644 (file)
@@ -41,6 +41,12 @@ extern "C" {
 
 #define rte_cio_rmb() rte_rmb()
 
+static __rte_always_inline void
+rte_atomic_thread_fence(int memory_order)
+{
+       __atomic_thread_fence(memory_order);
+}
+
 /*------------------------ 128 bit atomic operations -------------------------*/
 
 #if defined(__ARM_FEATURE_ATOMICS) || defined(RTE_ARM_FEATURE_ATOMICS)
index e6ab15a..95270f1 100644 (file)
@@ -158,6 +158,11 @@ static inline void rte_cio_rmb(void);
        asm volatile ("" : : : "memory");       \
 } while(0)
 
+/**
+ * Synchronization fence between threads based on the specified memory order.
+ */
+static inline void rte_atomic_thread_fence(int memory_order);
+
 /*------------------------- 16 bit atomic operations -------------------------*/
 
 /**
index 7e3e131..527fcaf 100644 (file)
@@ -40,6 +40,12 @@ extern "C" {
 
 #define rte_cio_rmb() rte_rmb()
 
+static __rte_always_inline void
+rte_atomic_thread_fence(int memory_order)
+{
+       __atomic_thread_fence(memory_order);
+}
+
 /*------------------------- 16 bit atomic operations -------------------------*/
 /* To be compatible with Power7, use GCC built-in functions for 16 bit
  * operations */
index b9dcd30..62ea393 100644 (file)
@@ -83,6 +83,22 @@ rte_smp_mb(void)
 
 #define rte_cio_rmb() rte_compiler_barrier()
 
+/**
+ * Synchronization fence between threads based on the specified memory order.
+ *
+ * On x86 the __atomic_thread_fence(__ATOMIC_SEQ_CST) generates full 'mfence'
+ * which is quite expensive. The optimized implementation of rte_smp_mb is
+ * used instead.
+ */
+static __rte_always_inline void
+rte_atomic_thread_fence(int memory_order)
+{
+       if (memory_order == __ATOMIC_SEQ_CST)
+               rte_smp_mb();
+       else
+               __atomic_thread_fence(memory_order);
+}
+
 /*------------------------- 16 bit atomic operations -------------------------*/
 
 #ifndef RTE_FORCE_INTRINSICS