1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef _RTE_SPINLOCK_H_
6 #define _RTE_SPINLOCK_H_
13 * This file defines an API for read-write locks, which are implemented
14 * in an architecture-specific way. This kind of lock simply waits in
15 * a loop repeatedly checking until the lock becomes available.
17 * All locks must be initialised before use, and only initialised once.
21 #include <rte_lcore.h>
22 #ifdef RTE_FORCE_INTRINSICS
23 #include <rte_common.h>
25 #include <rte_pause.h>
28 * The rte_spinlock_t type.
31 volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
35 * A static spinlock initializer.
37 #define RTE_SPINLOCK_INITIALIZER { 0 }
40 * Initialize the spinlock to an unlocked state.
43 * A pointer to the spinlock.
46 rte_spinlock_init(rte_spinlock_t *sl)
55 * A pointer to the spinlock.
58 rte_spinlock_lock(rte_spinlock_t *sl);
60 #ifdef RTE_FORCE_INTRINSICS
62 rte_spinlock_lock(rte_spinlock_t *sl)
66 while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
67 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
68 rte_wait_until_equal_32((volatile uint32_t *)&sl->locked,
76 * Release the spinlock.
79 * A pointer to the spinlock.
82 rte_spinlock_unlock (rte_spinlock_t *sl);
84 #ifdef RTE_FORCE_INTRINSICS
86 rte_spinlock_unlock (rte_spinlock_t *sl)
88 __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
93 * Try to take the lock.
96 * A pointer to the spinlock.
98 * 1 if the lock is successfully taken; 0 otherwise.
100 __rte_warn_unused_result
102 rte_spinlock_trylock (rte_spinlock_t *sl);
104 #ifdef RTE_FORCE_INTRINSICS
106 rte_spinlock_trylock (rte_spinlock_t *sl)
109 return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
110 0, /* disallow spurious failure */
111 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
116 * Test if the lock is taken.
119 * A pointer to the spinlock.
121 * 1 if the lock is currently taken; 0 otherwise.
123 static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
125 return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
129 * Test if hardware transactional memory (lock elision) is supported
132 * 1 if the hardware transactional memory is supported; 0 otherwise.
134 static inline int rte_tm_supported(void);
137 * Try to execute critical section in a hardware memory transaction,
138 * if it fails or not available take the spinlock.
140 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
141 * transaction always aborts the transaction since the CPU is not able to
142 * roll-back should the transaction fail. Therefore, hardware transactional
143 * locks are not advised to be used around rte_eth_rx_burst() and
144 * rte_eth_tx_burst() calls.
147 * A pointer to the spinlock.
150 rte_spinlock_lock_tm(rte_spinlock_t *sl);
153 * Commit hardware memory transaction or release the spinlock if
154 * the spinlock is used as a fall-back
157 * A pointer to the spinlock.
160 rte_spinlock_unlock_tm(rte_spinlock_t *sl);
163 * Try to execute critical section in a hardware memory transaction,
164 * if it fails or not available try to take the lock.
166 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
167 * transaction always aborts the transaction since the CPU is not able to
168 * roll-back should the transaction fail. Therefore, hardware transactional
169 * locks are not advised to be used around rte_eth_rx_burst() and
170 * rte_eth_tx_burst() calls.
173 * A pointer to the spinlock.
175 * 1 if the hardware memory transaction is successfully started
176 * or lock is successfully taken; 0 otherwise.
178 __rte_warn_unused_result
180 rte_spinlock_trylock_tm(rte_spinlock_t *sl);
183 * The rte_spinlock_recursive_t type.
186 rte_spinlock_t sl; /**< the actual spinlock */
187 volatile int user; /**< core id using lock, -1 for unused */
188 volatile int count; /**< count of time this lock has been called */
189 } rte_spinlock_recursive_t;
192 * A static recursive spinlock initializer.
194 #define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
197 * Initialize the recursive spinlock to an unlocked state.
200 * A pointer to the recursive spinlock.
202 static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
204 rte_spinlock_init(&slr->sl);
210 * Take the recursive spinlock.
213 * A pointer to the recursive spinlock.
215 static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
217 int id = rte_gettid();
219 if (slr->user != id) {
220 rte_spinlock_lock(&slr->sl);
226 * Release the recursive spinlock.
229 * A pointer to the recursive spinlock.
231 static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
233 if (--(slr->count) == 0) {
235 rte_spinlock_unlock(&slr->sl);
241 * Try to take the recursive lock.
244 * A pointer to the recursive spinlock.
246 * 1 if the lock is successfully taken; 0 otherwise.
248 __rte_warn_unused_result
249 static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
251 int id = rte_gettid();
253 if (slr->user != id) {
254 if (rte_spinlock_trylock(&slr->sl) == 0)
264 * Try to execute critical section in a hardware memory transaction,
265 * if it fails or not available take the recursive spinlocks
267 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
268 * transaction always aborts the transaction since the CPU is not able to
269 * roll-back should the transaction fail. Therefore, hardware transactional
270 * locks are not advised to be used around rte_eth_rx_burst() and
271 * rte_eth_tx_burst() calls.
274 * A pointer to the recursive spinlock.
276 static inline void rte_spinlock_recursive_lock_tm(
277 rte_spinlock_recursive_t *slr);
280 * Commit hardware memory transaction or release the recursive spinlock
281 * if the recursive spinlock is used as a fall-back
284 * A pointer to the recursive spinlock.
286 static inline void rte_spinlock_recursive_unlock_tm(
287 rte_spinlock_recursive_t *slr);
290 * Try to execute critical section in a hardware memory transaction,
291 * if it fails or not available try to take the recursive lock
293 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
294 * transaction always aborts the transaction since the CPU is not able to
295 * roll-back should the transaction fail. Therefore, hardware transactional
296 * locks are not advised to be used around rte_eth_rx_burst() and
297 * rte_eth_tx_burst() calls.
300 * A pointer to the recursive spinlock.
302 * 1 if the hardware memory transaction is successfully started
303 * or lock is successfully taken; 0 otherwise.
305 __rte_warn_unused_result
306 static inline int rte_spinlock_recursive_trylock_tm(
307 rte_spinlock_recursive_t *slr);
309 #endif /* _RTE_SPINLOCK_H_ */