4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_SPINLOCK_H_
35 #define _RTE_SPINLOCK_H_
42 * This file defines an API for read-write locks, which are implemented
43 * in an architecture-specific way. This kind of lock simply waits in
44 * a loop repeatedly checking until the lock becomes available.
46 * All locks must be initialised before use, and only initialised once.
50 #include <rte_lcore.h>
51 #ifdef RTE_FORCE_INTRINSICS
52 #include <rte_common.h>
54 #include <rte_pause.h>
57 * The rte_spinlock_t type.
60 volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
64 * A static spinlock initializer.
66 #define RTE_SPINLOCK_INITIALIZER { 0 }
69 * Initialize the spinlock to an unlocked state.
72 * A pointer to the spinlock.
75 rte_spinlock_init(rte_spinlock_t *sl)
84 * A pointer to the spinlock.
87 rte_spinlock_lock(rte_spinlock_t *sl);
89 #ifdef RTE_FORCE_INTRINSICS
91 rte_spinlock_lock(rte_spinlock_t *sl)
93 while (__sync_lock_test_and_set(&sl->locked, 1))
100 * Release the spinlock.
103 * A pointer to the spinlock.
106 rte_spinlock_unlock (rte_spinlock_t *sl);
108 #ifdef RTE_FORCE_INTRINSICS
110 rte_spinlock_unlock (rte_spinlock_t *sl)
112 __sync_lock_release(&sl->locked);
117 * Try to take the lock.
120 * A pointer to the spinlock.
122 * 1 if the lock is successfully taken; 0 otherwise.
125 rte_spinlock_trylock (rte_spinlock_t *sl);
127 #ifdef RTE_FORCE_INTRINSICS
129 rte_spinlock_trylock (rte_spinlock_t *sl)
131 return __sync_lock_test_and_set(&sl->locked,1) == 0;
136 * Test if the lock is taken.
139 * A pointer to the spinlock.
141 * 1 if the lock is currently taken; 0 otherwise.
143 static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
149 * Test if hardware transactional memory (lock elision) is supported
152 * 1 if the hardware transactional memory is supported; 0 otherwise.
154 static inline int rte_tm_supported(void);
157 * Try to execute critical section in a hardware memory transaction,
158 * if it fails or not available take the spinlock.
160 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
161 * transaction always aborts the transaction since the CPU is not able to
162 * roll-back should the transaction fail. Therefore, hardware transactional
163 * locks are not advised to be used around rte_eth_rx_burst() and
164 * rte_eth_tx_burst() calls.
167 * A pointer to the spinlock.
170 rte_spinlock_lock_tm(rte_spinlock_t *sl);
173 * Commit hardware memory transaction or release the spinlock if
174 * the spinlock is used as a fall-back
177 * A pointer to the spinlock.
180 rte_spinlock_unlock_tm(rte_spinlock_t *sl);
183 * Try to execute critical section in a hardware memory transaction,
184 * if it fails or not available try to take the lock.
186 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
187 * transaction always aborts the transaction since the CPU is not able to
188 * roll-back should the transaction fail. Therefore, hardware transactional
189 * locks are not advised to be used around rte_eth_rx_burst() and
190 * rte_eth_tx_burst() calls.
193 * A pointer to the spinlock.
195 * 1 if the hardware memory transaction is successfully started
196 * or lock is successfully taken; 0 otherwise.
199 rte_spinlock_trylock_tm(rte_spinlock_t *sl);
202 * The rte_spinlock_recursive_t type.
205 rte_spinlock_t sl; /**< the actual spinlock */
206 volatile int user; /**< core id using lock, -1 for unused */
207 volatile int count; /**< count of time this lock has been called */
208 } rte_spinlock_recursive_t;
211 * A static recursive spinlock initializer.
213 #define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
216 * Initialize the recursive spinlock to an unlocked state.
219 * A pointer to the recursive spinlock.
221 static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
223 rte_spinlock_init(&slr->sl);
229 * Take the recursive spinlock.
232 * A pointer to the recursive spinlock.
234 static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
236 int id = rte_gettid();
238 if (slr->user != id) {
239 rte_spinlock_lock(&slr->sl);
245 * Release the recursive spinlock.
248 * A pointer to the recursive spinlock.
250 static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
252 if (--(slr->count) == 0) {
254 rte_spinlock_unlock(&slr->sl);
260 * Try to take the recursive lock.
263 * A pointer to the recursive spinlock.
265 * 1 if the lock is successfully taken; 0 otherwise.
267 static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
269 int id = rte_gettid();
271 if (slr->user != id) {
272 if (rte_spinlock_trylock(&slr->sl) == 0)
282 * Try to execute critical section in a hardware memory transaction,
283 * if it fails or not available take the recursive spinlocks
285 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
286 * transaction always aborts the transaction since the CPU is not able to
287 * roll-back should the transaction fail. Therefore, hardware transactional
288 * locks are not advised to be used around rte_eth_rx_burst() and
289 * rte_eth_tx_burst() calls.
292 * A pointer to the recursive spinlock.
294 static inline void rte_spinlock_recursive_lock_tm(
295 rte_spinlock_recursive_t *slr);
298 * Commit hardware memory transaction or release the recursive spinlock
299 * if the recursive spinlock is used as a fall-back
302 * A pointer to the recursive spinlock.
304 static inline void rte_spinlock_recursive_unlock_tm(
305 rte_spinlock_recursive_t *slr);
308 * Try to execute critical section in a hardware memory transaction,
309 * if it fails or not available try to take the recursive lock
311 * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
312 * transaction always aborts the transaction since the CPU is not able to
313 * roll-back should the transaction fail. Therefore, hardware transactional
314 * locks are not advised to be used around rte_eth_rx_burst() and
315 * rte_eth_tx_burst() calls.
318 * A pointer to the recursive spinlock.
320 * 1 if the hardware memory transaction is successfully started
321 * or lock is successfully taken; 0 otherwise.
323 static inline int rte_spinlock_recursive_trylock_tm(
324 rte_spinlock_recursive_t *slr);
326 #endif /* _RTE_SPINLOCK_H_ */