X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fx86%2Finclude%2Frte_atomic.h;h=915afd9d2753028f58cc9e68717a164d38c2f066;hb=30d604bb15042208896cb3a924b49247adb9f2ed;hp=b9dcd30aba9ab79bbe70b5088b616284befcd5b1;hpb=a9aa14d9aa755472ab4e2aae62524feed7e8731f;p=dpdk.git diff --git a/lib/librte_eal/x86/include/rte_atomic.h b/lib/librte_eal/x86/include/rte_atomic.h index b9dcd30aba..915afd9d27 100644 --- a/lib/librte_eal/x86/include/rte_atomic.h +++ b/lib/librte_eal/x86/include/rte_atomic.h @@ -79,9 +79,21 @@ rte_smp_mb(void) #define rte_io_rmb() rte_compiler_barrier() -#define rte_cio_wmb() rte_compiler_barrier() - -#define rte_cio_rmb() rte_compiler_barrier() +/** + * Synchronization fence between threads based on the specified memory order. + * + * On x86 the __atomic_thread_fence(__ATOMIC_SEQ_CST) generates full 'mfence' + * which is quite expensive. The optimized implementation of rte_smp_mb is + * used instead. + */ +static __rte_always_inline void +rte_atomic_thread_fence(int memorder) +{ + if (memorder == __ATOMIC_SEQ_CST) + rte_smp_mb(); + else + __atomic_thread_fence(memorder); +} /*------------------------- 16 bit atomic operations -------------------------*/