1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef _RTE_SPINLOCK_H_
6 #define _RTE_SPINLOCK_H_
13 * This file defines an API for read-write locks, which are implemented
14 * in an architecture-specific way. This kind of lock simply waits in
15 * a loop repeatedly checking until the lock becomes available.
17 * All locks must be initialised before use, and only initialised once.
21 #include <rte_lcore.h>
22 #ifdef RTE_FORCE_INTRINSICS
23 #include <rte_common.h>
25 #include <rte_pause.h>
28 * The rte_spinlock_t type.
31 volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
35 * A static spinlock initializer.
37 #define RTE_SPINLOCK_INITIALIZER { 0 }
40 * Initialize the spinlock to an unlocked state.
43 * A pointer to the spinlock.
46 rte_spinlock_init(rte_spinlock_t *sl)
55 * A pointer to the spinlock.
58 rte_spinlock_lock(rte_spinlock_t *sl);
60 #ifdef RTE_FORCE_INTRINSICS
62 rte_spinlock_lock(rte_spinlock_t *sl)
66 while (!__atomic_compare_exchange_n(&sl->locked, &exp, 1, 0,
67 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
68 while (__atomic_load_n(&sl->locked, __ATOMIC_RELAXED))
76 * Release the spinlock.
79 * A pointer to the spinlock.
82 rte_spinlock_unlock (rte_spinlock_t *sl);
84 #ifdef RTE_FORCE_INTRINSICS
86 rte_spinlock_unlock (rte_spinlock_t *sl)
88 __atomic_store_n(&sl->locked, 0, __ATOMIC_RELEASE);
93 * Try to take the lock.
96 * A pointer to the spinlock.
98 * 1 if the lock is successfully taken; 0 otherwise.
101 rte_spinlock_trylock (rte_spinlock_t *sl);
103 #ifdef RTE_FORCE_INTRINSICS
105 rte_spinlock_trylock (rte_spinlock_t *sl)
108 return __atomic_compare_exchange_n(&sl->locked, &exp, 1,
109 0, /* disallow spurious failure */
110 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
115 * Test if the lock is taken.
118 * A pointer to the spinlock.
120 * 1 if the lock is currently taken; 0 otherwise.
122 static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
124 return __atomic_load_n(&sl->locked, __ATOMIC_ACQUIRE);
128 * Test if hardware transactional memory (lock elision) is supported
131 * 1 if the hardware transactional memory is supported; 0 otherwise.
133 static inline int rte_tm_supported(void);
136 * Try to execute critical section in a hardware memory transaction,
137 * if it fails or not available take the spinlock.
139 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
140 * transaction always aborts the transaction since the CPU is not able to
141 * roll-back should the transaction fail. Therefore, hardware transactional
142 * locks are not advised to be used around rte_eth_rx_burst() and
143 * rte_eth_tx_burst() calls.
146 * A pointer to the spinlock.
149 rte_spinlock_lock_tm(rte_spinlock_t *sl);
152 * Commit hardware memory transaction or release the spinlock if
153 * the spinlock is used as a fall-back
156 * A pointer to the spinlock.
159 rte_spinlock_unlock_tm(rte_spinlock_t *sl);
162 * Try to execute critical section in a hardware memory transaction,
163 * if it fails or not available try to take the lock.
165 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
166 * transaction always aborts the transaction since the CPU is not able to
167 * roll-back should the transaction fail. Therefore, hardware transactional
168 * locks are not advised to be used around rte_eth_rx_burst() and
169 * rte_eth_tx_burst() calls.
172 * A pointer to the spinlock.
174 * 1 if the hardware memory transaction is successfully started
175 * or lock is successfully taken; 0 otherwise.
178 rte_spinlock_trylock_tm(rte_spinlock_t *sl);
181 * The rte_spinlock_recursive_t type.
184 rte_spinlock_t sl; /**< the actual spinlock */
185 volatile int user; /**< core id using lock, -1 for unused */
186 volatile int count; /**< count of time this lock has been called */
187 } rte_spinlock_recursive_t;
190 * A static recursive spinlock initializer.
192 #define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
195 * Initialize the recursive spinlock to an unlocked state.
198 * A pointer to the recursive spinlock.
200 static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
202 rte_spinlock_init(&slr->sl);
208 * Take the recursive spinlock.
211 * A pointer to the recursive spinlock.
213 static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
215 int id = rte_gettid();
217 if (slr->user != id) {
218 rte_spinlock_lock(&slr->sl);
224 * Release the recursive spinlock.
227 * A pointer to the recursive spinlock.
229 static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
231 if (--(slr->count) == 0) {
233 rte_spinlock_unlock(&slr->sl);
239 * Try to take the recursive lock.
242 * A pointer to the recursive spinlock.
244 * 1 if the lock is successfully taken; 0 otherwise.
246 static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
248 int id = rte_gettid();
250 if (slr->user != id) {
251 if (rte_spinlock_trylock(&slr->sl) == 0)
261 * Try to execute critical section in a hardware memory transaction,
262 * if it fails or not available take the recursive spinlocks
264 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
265 * transaction always aborts the transaction since the CPU is not able to
266 * roll-back should the transaction fail. Therefore, hardware transactional
267 * locks are not advised to be used around rte_eth_rx_burst() and
268 * rte_eth_tx_burst() calls.
271 * A pointer to the recursive spinlock.
273 static inline void rte_spinlock_recursive_lock_tm(
274 rte_spinlock_recursive_t *slr);
277 * Commit hardware memory transaction or release the recursive spinlock
278 * if the recursive spinlock is used as a fall-back
281 * A pointer to the recursive spinlock.
283 static inline void rte_spinlock_recursive_unlock_tm(
284 rte_spinlock_recursive_t *slr);
287 * Try to execute critical section in a hardware memory transaction,
288 * if it fails or not available try to take the recursive lock
290 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
291 * transaction always aborts the transaction since the CPU is not able to
292 * roll-back should the transaction fail. Therefore, hardware transactional
293 * locks are not advised to be used around rte_eth_rx_burst() and
294 * rte_eth_tx_burst() calls.
297 * A pointer to the recursive spinlock.
299 * 1 if the hardware memory transaction is successfully started
300 * or lock is successfully taken; 0 otherwise.
302 static inline int rte_spinlock_recursive_trylock_tm(
303 rte_spinlock_recursive_t *slr);
305 #endif /* _RTE_SPINLOCK_H_ */