X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Finclude%2Farch%2Fppc_64%2Frte_atomic.h;h=150810cdb36533ae770889021ed79c04c52f7722;hb=119583797b6ae44990934d7b965e76463530ca7a;hp=fb7af2bdea3ba05cdda0620a7d82daad020a8c4c;hpb=05c3fd71104e63b0f269ea7d5e909559bd122d93;p=dpdk.git diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h index fb7af2bdea..150810cdb3 100644 --- a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h +++ b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h @@ -46,6 +46,7 @@ extern "C" { #endif +#include #include "generic/rte_atomic.h" /** @@ -62,7 +63,11 @@ extern "C" { * Guarantees that the STORE operations generated before the barrier * occur before the STORE operations generated after. */ +#ifdef RTE_ARCH_64 +#define rte_wmb() {asm volatile("lwsync" : : : "memory"); } +#else #define rte_wmb() {asm volatile("sync" : : : "memory"); } +#endif /** * Read memory barrier. @@ -70,7 +75,23 @@ extern "C" { * Guarantees that the LOAD operations generated before the barrier * occur before the LOAD operations generated after. */ +#ifdef RTE_ARCH_64 +#define rte_rmb() {asm volatile("lwsync" : : : "memory"); } +#else #define rte_rmb() {asm volatile("sync" : : : "memory"); } +#endif + +#define rte_smp_mb() rte_mb() + +#define rte_smp_wmb() rte_wmb() + +#define rte_smp_rmb() rte_rmb() + +#define rte_io_mb() rte_mb() + +#define rte_io_wmb() rte_wmb() + +#define rte_io_rmb() rte_rmb() /*------------------------- 16 bit atomic operations -------------------------*/ /* To be compatible with Power7, use GCC built-in functions for 16 bit @@ -103,12 +124,12 @@ rte_atomic16_dec(rte_atomic16_t *v) static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) { - return (__atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0); + return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0; } static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) { - return (__atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0); + return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0; } /*------------------------- 32 bit atomic operations -------------------------*/ @@ -192,7 +213,7 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) : [cnt] "r" (&v->cnt) : "cc", "xer", "memory"); - return (ret == 0); + return ret == 0; } static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) @@ -210,7 +231,7 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) : [cnt] "r" (&v->cnt) : "cc", "xer", "memory"); - return (ret == 0); + return ret == 0; } /*------------------------- 64 bit atomic operations -------------------------*/ @@ -381,7 +402,7 @@ static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) : [cnt] "r" (&v->cnt) : "cc", "xer", "memory"); - return (ret == 0); + return ret == 0; } static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) @@ -399,7 +420,7 @@ static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) : [cnt] "r" (&v->cnt) : "cc", "xer", "memory"); - return (ret == 0); + return ret == 0; } static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)