*
* Guarantees that the LOAD and STORE operations generated before the
* barrier occur before the LOAD and STORE operations generated after.
- * This function is architecture dependent.
*/
static inline void rte_mb(void);
*
* Guarantees that the STORE operations generated before the barrier
* occur before the STORE operations generated after.
- * This function is architecture dependent.
*/
static inline void rte_wmb(void);
*
* Guarantees that the LOAD operations generated before the barrier
* occur before the LOAD operations generated after.
- * This function is architecture dependent.
*/
static inline void rte_rmb(void);
///@}
static inline uint16_t
rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
{
-#if defined(RTE_ARCH_ARM64) && defined(__clang__)
+#if defined(__clang__)
return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
#else
return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
static inline uint32_t
rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
{
-#if defined(RTE_ARCH_ARM64) && defined(__clang__)
+#if defined(__clang__)
return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
#else
return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
static inline uint64_t
rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
{
-#if defined(RTE_ARCH_ARM64) && defined(__clang__)
+#if defined(__clang__)
return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
#else
return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);