X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Finclude%2Fgeneric%2Frte_atomic.h;h=e6ab15a9733ce5e3f2ca85dd7e1cae86eda842ef;hb=df3ff6be2b33faea3edf3c112b9bdc5b74d6f684;hp=58c40489bf58959d3bc3e4f23ba3fbe556875b16;hpb=9bea9e865a57ece9a60b1ab8302f5f2305e2c363;p=dpdk.git diff --git a/lib/librte_eal/common/include/generic/rte_atomic.h b/lib/librte_eal/common/include/generic/rte_atomic.h index 58c40489bf..e6ab15a973 100644 --- a/lib/librte_eal/common/include/generic/rte_atomic.h +++ b/lib/librte_eal/common/include/generic/rte_atomic.h @@ -25,7 +25,6 @@ * * Guarantees that the LOAD and STORE operations generated before the * barrier occur before the LOAD and STORE operations generated after. - * This function is architecture dependent. */ static inline void rte_mb(void); @@ -34,7 +33,6 @@ static inline void rte_mb(void); * * Guarantees that the STORE operations generated before the barrier * occur before the STORE operations generated after. - * This function is architecture dependent. */ static inline void rte_wmb(void); @@ -43,7 +41,6 @@ static inline void rte_wmb(void); * * Guarantees that the LOAD operations generated before the barrier * occur before the LOAD operations generated after. - * This function is architecture dependent. */ static inline void rte_rmb(void); ///@} @@ -110,6 +107,45 @@ static inline void rte_io_wmb(void); static inline void rte_io_rmb(void); ///@} +/** @name Coherent I/O Memory Barrier + * + * Coherent I/O memory barrier is a lightweight version of I/O memory + * barriers which are system-wide data synchronization barriers. This + * is for only coherent memory domain between lcore and I/O device but + * it is same as the I/O memory barriers in most of architectures. + * However, some architecture provides even lighter barriers which are + * somewhere in between I/O memory barriers and SMP memory barriers. + * For example, in case of ARMv8, DMB(data memory barrier) instruction + * can have different shareability domains - inner-shareable and + * outer-shareable. And inner-shareable DMB fits for SMP memory + * barriers and outer-shareable DMB for coherent I/O memory barriers, + * which acts on coherent memory. + * + * In most cases, I/O memory barriers are safer but if operations are + * on coherent memory instead of incoherent MMIO region of a device, + * then coherent I/O memory barriers can be used and this could bring + * performance gain depending on architectures. + */ +///@{ +/** + * Write memory barrier for coherent memory between lcore and I/O device + * + * Guarantees that the STORE operations on coherent memory that + * precede the rte_cio_wmb() call are visible to I/O device before the + * STORE operations that follow it. + */ +static inline void rte_cio_wmb(void); + +/** + * Read memory barrier for coherent memory between lcore and I/O device + * + * Guarantees that the LOAD operations on coherent memory updated by + * I/O device that precede the rte_cio_rmb() call are visible to CPU + * before the LOAD operations that follow it. + */ +static inline void rte_cio_rmb(void); +///@} + #endif /* __DOXYGEN__ */ /** @@ -151,6 +187,36 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) } #endif +/** + * Atomic exchange. + * + * (atomic) equivalent to: + * ret = *dst + * *dst = val; + * return ret; + * + * @param dst + * The destination location into which the value will be written. + * @param val + * The new value. + * @return + * The original value at that location + */ +static inline uint16_t +rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val); + +#ifdef RTE_FORCE_INTRINSICS +static inline uint16_t +rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST); +#endif +} +#endif + /** * The atomic counter structure. */ @@ -404,6 +470,36 @@ rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) } #endif +/** + * Atomic exchange. + * + * (atomic) equivalent to: + * ret = *dst + * *dst = val; + * return ret; + * + * @param dst + * The destination location into which the value will be written. + * @param val + * The new value. + * @return + * The original value at that location + */ +static inline uint32_t +rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val); + +#ifdef RTE_FORCE_INTRINSICS +static inline uint32_t +rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST); +#endif +} +#endif + /** * The atomic counter structure. */ @@ -656,6 +752,36 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) } #endif +/** + * Atomic exchange. + * + * (atomic) equivalent to: + * ret = *dst + * *dst = val; + * return ret; + * + * @param dst + * The destination location into which the value will be written. + * @param val + * The new value. + * @return + * The original value at that location + */ +static inline uint64_t +rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val); + +#ifdef RTE_FORCE_INTRINSICS +static inline uint64_t +rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST); +#endif +} +#endif + /** * The atomic counter structure. */ @@ -953,4 +1079,72 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v) } #endif +/*------------------------ 128 bit atomic operations -------------------------*/ + +/** + * 128-bit integer structure. + */ +RTE_STD_C11 +typedef struct { + RTE_STD_C11 + union { + uint64_t val[2]; +#ifdef RTE_ARCH_64 + __extension__ __int128 int128; +#endif + }; +} __rte_aligned(16) rte_int128_t; + +#ifdef __DOXYGEN__ + +/** + * An atomic compare and set function used by the mutex functions. + * (Atomically) Equivalent to: + * @code + * if (*dst == *exp) + * *dst = *src + * else + * *exp = *dst + * @endcode + * + * @note This function is currently available for the x86-64 and aarch64 + * platforms. + * + * @note The success and failure arguments must be one of the __ATOMIC_* values + * defined in the C++11 standard. For details on their behavior, refer to the + * standard. + * + * @param dst + * The destination into which the value will be written. + * @param exp + * Pointer to the expected value. If the operation fails, this memory is + * updated with the actual value. + * @param src + * Pointer to the new value. + * @param weak + * A value of true allows the comparison to spuriously fail and allows the + * 'exp' update to occur non-atomically (i.e. a torn read may occur). + * Implementations may ignore this argument and only implement the strong + * variant. + * @param success + * If successful, the operation's memory behavior conforms to this (or a + * stronger) model. + * @param failure + * If unsuccessful, the operation's memory behavior conforms to this (or a + * stronger) model. This argument cannot be __ATOMIC_RELEASE, + * __ATOMIC_ACQ_REL, or a stronger model than success. + * @return + * Non-zero on success; 0 on failure. + */ +__rte_experimental +static inline int +rte_atomic128_cmp_exchange(rte_int128_t *dst, + rte_int128_t *exp, + const rte_int128_t *src, + unsigned int weak, + int success, + int failure); + +#endif /* __DOXYGEN__ */ + #endif /* _RTE_ATOMIC_H_ */