X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Finclude%2Frte_atomic.h;h=a5b6eec30276c0fee9764b90a099c8c74cf72ed5;hb=a4ff75496a18f78bed3a678131bfdd11f1e8d3f7;hp=ffd90e49b8e81147e932e557759fe72caead1f36;hpb=3b46fb77ebdb6bb6b47c578a2986077dcff68a19;p=dpdk.git diff --git a/lib/librte_eal/common/include/rte_atomic.h b/lib/librte_eal/common/include/rte_atomic.h index ffd90e49b8..a5b6eec302 100644 --- a/lib/librte_eal/common/include/rte_atomic.h +++ b/lib/librte_eal/common/include/rte_atomic.h @@ -1,35 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #ifndef _RTE_ATOMIC_H_ @@ -64,7 +63,7 @@ extern "C" { * Guarantees that the LOAD and STORE operations generated before the * barrier occur before the LOAD and STORE operations generated after. */ -#define rte_mb() asm volatile("mfence;" : : : "memory") +#define rte_mb() _mm_mfence() /** * Write memory barrier. @@ -72,7 +71,7 @@ extern "C" { * Guarantees that the STORE operations generated before the barrier * occur before the STORE operations generated after. */ -#define rte_wmb() asm volatile("sfence;" : : : "memory") +#define rte_wmb() _mm_sfence() /** * Read memory barrier. @@ -80,7 +79,19 @@ extern "C" { * Guarantees that the LOAD operations generated before the barrier * occur before the LOAD operations generated after. */ -#define rte_rmb() asm volatile("lfence;" : : : "memory") +#define rte_rmb() _mm_lfence() + +/** + * Compiler barrier. + * + * Guarantees that operation reordering does not occur at compile time + * for operations directly before and after the barrier. + */ +#define rte_compiler_barrier() do { \ + asm volatile ("" : : : "memory"); \ +} while(0) + +#include /** * @file @@ -108,6 +119,7 @@ extern "C" { static inline int rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) { +#ifndef RTE_FORCE_INTRINSICS uint8_t res; asm volatile( @@ -121,6 +133,9 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) "m" (*dst) : "memory"); /* no-clobber list */ return res; +#else + return __sync_bool_compare_and_swap(dst, exp, src); +#endif } /** @@ -212,12 +227,16 @@ rte_atomic16_sub(rte_atomic16_t *v, int16_t dec) static inline void rte_atomic16_inc(rte_atomic16_t *v) { +#ifndef RTE_FORCE_INTRINSICS asm volatile( MPLOCKED "incw %[cnt]" : [cnt] "=m" (v->cnt) /* output */ : "m" (v->cnt) /* input */ ); +#else + rte_atomic16_add(v, 1); +#endif } /** @@ -229,12 +248,16 @@ rte_atomic16_inc(rte_atomic16_t *v) static inline void rte_atomic16_dec(rte_atomic16_t *v) { +#ifndef RTE_FORCE_INTRINSICS asm volatile( MPLOCKED "decw %[cnt]" : [cnt] "=m" (v->cnt) /* output */ : "m" (v->cnt) /* input */ ); +#else + rte_atomic16_sub(v, 1); +#endif } /** @@ -289,6 +312,7 @@ rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec) */ static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) { +#ifndef RTE_FORCE_INTRINSICS uint8_t ret; asm volatile( @@ -299,6 +323,9 @@ static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) [ret] "=qm" (ret) ); return (ret != 0); +#else + return (__sync_add_and_fetch(&v->cnt, 1) == 0); +#endif } /** @@ -314,6 +341,7 @@ static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) */ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) { +#ifndef RTE_FORCE_INTRINSICS uint8_t ret; asm volatile(MPLOCKED @@ -323,6 +351,9 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) [ret] "=qm" (ret) ); return (ret != 0); +#else + return (__sync_sub_and_fetch(&v->cnt, 1) == 0); +#endif } /** @@ -373,6 +404,7 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v) static inline int rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) { +#ifndef RTE_FORCE_INTRINSICS uint8_t res; asm volatile( @@ -386,6 +418,9 @@ rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) "m" (*dst) : "memory"); /* no-clobber list */ return res; +#else + return __sync_bool_compare_and_swap(dst, exp, src); +#endif } /** @@ -477,12 +512,16 @@ rte_atomic32_sub(rte_atomic32_t *v, int32_t dec) static inline void rte_atomic32_inc(rte_atomic32_t *v) { +#ifndef RTE_FORCE_INTRINSICS asm volatile( MPLOCKED "incl %[cnt]" : [cnt] "=m" (v->cnt) /* output */ : "m" (v->cnt) /* input */ ); +#else + rte_atomic32_add(v, 1); +#endif } /** @@ -494,12 +533,16 @@ rte_atomic32_inc(rte_atomic32_t *v) static inline void rte_atomic32_dec(rte_atomic32_t *v) { +#ifndef RTE_FORCE_INTRINSICS asm volatile( MPLOCKED "decl %[cnt]" : [cnt] "=m" (v->cnt) /* output */ : "m" (v->cnt) /* input */ ); +#else + rte_atomic32_sub(v,1); +#endif } /** @@ -554,6 +597,7 @@ rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec) */ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) { +#ifndef RTE_FORCE_INTRINSICS uint8_t ret; asm volatile( @@ -564,6 +608,9 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) [ret] "=qm" (ret) ); return (ret != 0); +#else + return (__sync_add_and_fetch(&v->cnt, 1) == 0); +#endif } /** @@ -579,6 +626,7 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) */ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) { +#ifndef RTE_FORCE_INTRINSICS uint8_t ret; asm volatile(MPLOCKED @@ -588,6 +636,9 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) [ret] "=qm" (ret) ); return (ret != 0); +#else + return (__sync_sub_and_fetch(&v->cnt, 1) == 0); +#endif } /** @@ -617,6 +668,7 @@ static inline void rte_atomic32_clear(rte_atomic32_t *v) v->cnt = 0; } +#ifndef RTE_FORCE_INTRINSICS /* any other functions are in arch specific files */ #include "arch/rte_atomic.h" @@ -811,6 +863,268 @@ rte_atomic64_clear(rte_atomic64_t *v); #endif /* __DOXYGEN__ */ +#else /*RTE_FORCE_INTRINSICS */ + +/*------------------------- 64 bit atomic operations -------------------------*/ + +/** + * An atomic compare and set function used by the mutex functions. + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 64-bit words) + * + * @param dst + * The destination into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) +{ + return __sync_bool_compare_and_swap(dst, exp, src); +} + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int64_t cnt; /**< Internal counter value. */ +} rte_atomic64_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC64_INIT(val) { (val) } + +/** + * Initialize the atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_init(rte_atomic64_t *v) +{ +#ifdef __LP64__ + v->cnt = 0; +#else + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, 0); + } +#endif +} + +/** + * Atomically read a 64-bit counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int64_t +rte_atomic64_read(rte_atomic64_t *v) +{ +#ifdef __LP64__ + return v->cnt; +#else + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + /* replace the value by itself */ + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, tmp); + } + return tmp; +#endif +} + +/** + * Atomically set a 64-bit counter. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value of the counter. + */ +static inline void +rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) +{ +#ifdef __LP64__ + v->cnt = new_value; +#else + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, new_value); + } +#endif +} + +/** + * Atomically add a 64-bit value to a counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic64_add(rte_atomic64_t *v, int64_t inc) +{ + __sync_fetch_and_add(&v->cnt, inc); +} + +/** + * Atomically subtract a 64-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) +{ + __sync_fetch_and_sub(&v->cnt, dec); +} + +/** + * Atomically increment a 64-bit counter by one and test. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_inc(rte_atomic64_t *v) +{ + rte_atomic64_add(v, 1); +} + +/** + * Atomically decrement a 64-bit counter by one and test. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_dec(rte_atomic64_t *v) +{ + rte_atomic64_sub(v, 1); +} + +/** + * Add a 64-bit value to an atomic counter and return the result. + * + * Atomically adds the 64-bit value (inc) to the atomic counter (v) and + * returns the value of v after the addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int64_t +rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) +{ + return __sync_add_and_fetch(&v->cnt, inc); +} + +/** + * Subtract a 64-bit value from an atomic counter and return the result. + * + * Atomically subtracts the 64-bit value (dec) from the atomic counter (v) + * and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int64_t +rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) +{ + return __sync_sub_and_fetch(&v->cnt, dec); +} + +/** + * Atomically increment a 64-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns + * true if the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the addition is 0; false otherwise. + */ +static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) +{ + return rte_atomic64_add_return(v, 1) == 0; +} + +/** + * Atomically decrement a 64-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after subtraction is 0; false otherwise. + */ +static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) +{ + return rte_atomic64_sub_return(v, 1) == 0; +} + +/** + * Atomically test and set a 64-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) +{ + return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); +} + +/** + * Atomically set a 64-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic64_clear(rte_atomic64_t *v) +{ + rte_atomic64_set(v, 0); +} + +#endif /*RTE_FORCE_INTRINSICS */ #ifdef __cplusplus }