4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef _RTE_SPINLOCK_H_
36 #define _RTE_SPINLOCK_H_
43 * This file defines an API for read-write locks, which are implemented
44 * in an architecture-specific way. This kind of lock simply waits in
45 * a loop repeatedly checking until the lock becomes available.
47 * All locks must be initialised before use, and only initialised once.
55 #include <rte_lcore.h>
56 #ifdef RTE_FORCE_INTRINSICS
57 #include <rte_common.h>
61 * The rte_spinlock_t type.
64 volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
68 * A static spinlock initializer.
70 #define RTE_SPINLOCK_INITIALIZER { 0 }
73 * Initialize the spinlock to an unlocked state.
76 * A pointer to the spinlock.
79 rte_spinlock_init(rte_spinlock_t *sl)
88 * A pointer to the spinlock.
91 rte_spinlock_lock(rte_spinlock_t *sl)
93 #ifndef RTE_FORCE_INTRINSICS
97 "xchg %[locked], %[lv]\n"
102 "cmp $0, %[locked]\n"
106 : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
110 while (__sync_lock_test_and_set(&sl->locked, 1))
117 * Release the spinlock.
120 * A pointer to the spinlock.
123 rte_spinlock_unlock (rte_spinlock_t *sl)
125 #ifndef RTE_FORCE_INTRINSICS
128 "xchg %[locked], %[ulv]\n"
129 : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
130 : "[ulv]" (unlock_val)
133 __sync_lock_release(&sl->locked);
138 * Try to take the lock.
141 * A pointer to the spinlock.
143 * 1 if the lock is successfully taken; 0 otherwise.
146 rte_spinlock_trylock (rte_spinlock_t *sl)
148 #ifndef RTE_FORCE_INTRINSICS
152 "xchg %[locked], %[lockval]"
153 : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
154 : "[lockval]" (lockval)
157 return (lockval == 0);
159 return (__sync_lock_test_and_set(&sl->locked,1) == 0);
164 * Test if the lock is taken.
167 * A pointer to the spinlock.
169 * 1 if the lock is currently taken; 0 otherwise.
171 static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
177 * The rte_spinlock_recursive_t type.
180 rte_spinlock_t sl; /**< the actual spinlock */
181 volatile int user; /**< core id using lock, -1 for unused */
182 volatile int count; /**< count of time this lock has been called */
183 } rte_spinlock_recursive_t;
186 * A static recursive spinlock initializer.
188 #define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
191 * Initialize the recursive spinlock to an unlocked state.
194 * A pointer to the recursive spinlock.
196 static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
198 rte_spinlock_init(&slr->sl);
204 * Take the recursive spinlock.
207 * A pointer to the recursive spinlock.
209 static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
211 int id = rte_lcore_id();
213 if (slr->user != id) {
214 rte_spinlock_lock(&slr->sl);
220 * Release the recursive spinlock.
223 * A pointer to the recursive spinlock.
225 static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
227 if (--(slr->count) == 0) {
229 rte_spinlock_unlock(&slr->sl);
235 * Try to take the recursive lock.
238 * A pointer to the recursive spinlock.
240 * 1 if the lock is successfully taken; 0 otherwise.
242 static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
244 int id = rte_lcore_id();
246 if (slr->user != id) {
247 if (rte_spinlock_trylock(&slr->sl) == 0)
259 #endif /* _RTE_SPINLOCK_H_ */