From d23a6bd04d728226c99d6995c8bd65d49d7e61d1 Mon Sep 17 00:00:00 2001 From: Chao Zhu Date: Fri, 15 Jul 2016 10:30:19 +0800 Subject: [PATCH] eal/ppc: fix memory barrier for IBM POWER On weak memory order architecture like POWER, rte_smp_wmb/rte_smp_rmb need to use CPU instructions, not compiler barrier. This patch fixes this. Also, to improve performance on PPC64, use light weight sync instruction instead of sync instruction. Signed-off-by: Chao Zhu --- .../common/include/arch/ppc_64/rte_atomic.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h index feae4868ed..924e89408d 100644 --- a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h +++ b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h @@ -62,7 +62,11 @@ extern "C" { * Guarantees that the STORE operations generated before the barrier * occur before the STORE operations generated after. */ +#ifdef RTE_ARCH_64 +#define rte_wmb() {asm volatile("lwsync" : : : "memory"); } +#else #define rte_wmb() {asm volatile("sync" : : : "memory"); } +#endif /** * Read memory barrier. @@ -70,13 +74,17 @@ extern "C" { * Guarantees that the LOAD operations generated before the barrier * occur before the LOAD operations generated after. */ +#ifdef RTE_ARCH_64 +#define rte_rmb() {asm volatile("lwsync" : : : "memory"); } +#else #define rte_rmb() {asm volatile("sync" : : : "memory"); } +#endif #define rte_smp_mb() rte_mb() -#define rte_smp_wmb() rte_compiler_barrier() +#define rte_smp_wmb() rte_wmb() -#define rte_smp_rmb() rte_compiler_barrier() +#define rte_smp_rmb() rte_rmb() /*------------------------- 16 bit atomic operations -------------------------*/ /* To be compatible with Power7, use GCC built-in functions for 16 bit -- 2.20.1