1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 * Copyright(c) 2019 Arm Limited
12 * CPU pause operation.
18 #include <rte_common.h>
19 #include <rte_atomic.h>
20 #include <rte_compat.h>
23 * Pause CPU execution for a short while
25 * This call is intended for tight loops which poll a shared resource or wait
26 * for an event. A short pause within the loop may reduce the power consumption.
28 static inline void rte_pause(void);
32 * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
34 * Wait for *addr to be updated with a 16-bit expected value, with a relaxed
35 * memory ordering model meaning the loads around this API can be reordered.
38 * A pointer to the memory location.
40 * A 16-bit expected value to be in the memory location.
42 * Two different memory orders that can be specified:
43 * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
44 * C++11 memory orders with the same names, see the C++11 standard or
45 * the GCC wiki on atomic synchronization for detailed definition.
48 static __rte_always_inline void
49 rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
54 * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
56 * Wait for *addr to be updated with a 32-bit expected value, with a relaxed
57 * memory ordering model meaning the loads around this API can be reordered.
60 * A pointer to the memory location.
62 * A 32-bit expected value to be in the memory location.
64 * Two different memory orders that can be specified:
65 * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
66 * C++11 memory orders with the same names, see the C++11 standard or
67 * the GCC wiki on atomic synchronization for detailed definition.
70 static __rte_always_inline void
71 rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
76 * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
78 * Wait for *addr to be updated with a 64-bit expected value, with a relaxed
79 * memory ordering model meaning the loads around this API can be reordered.
82 * A pointer to the memory location.
84 * A 64-bit expected value to be in the memory location.
86 * Two different memory orders that can be specified:
87 * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
88 * C++11 memory orders with the same names, see the C++11 standard or
89 * the GCC wiki on atomic synchronization for detailed definition.
92 static __rte_always_inline void
93 rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
96 #ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
97 static __rte_always_inline void
98 rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
101 assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
103 while (__atomic_load_n(addr, memorder) != expected)
107 static __rte_always_inline void
108 rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
111 assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
113 while (__atomic_load_n(addr, memorder) != expected)
117 static __rte_always_inline void
118 rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
121 assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
123 while (__atomic_load_n(addr, memorder) != expected)
128 #endif /* _RTE_PAUSE_H_ */