4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_SPINLOCK_H_
35 #define _RTE_SPINLOCK_H_
42 * This file defines an API for read-write locks, which are implemented
43 * in an architecture-specific way. This kind of lock simply waits in
44 * a loop repeatedly checking until the lock becomes available.
46 * All locks must be initialised before use, and only initialised once.
54 #include <rte_lcore.h>
55 #ifdef RTE_FORCE_INTRINSICS
56 #include <rte_common.h>
60 * The rte_spinlock_t type.
63 volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
67 * A static spinlock initializer.
69 #define RTE_SPINLOCK_INITIALIZER { 0 }
72 * Initialize the spinlock to an unlocked state.
75 * A pointer to the spinlock.
78 rte_spinlock_init(rte_spinlock_t *sl)
87 * A pointer to the spinlock.
90 rte_spinlock_lock(rte_spinlock_t *sl)
92 #ifndef RTE_FORCE_INTRINSICS
96 "xchg %[locked], %[lv]\n"
101 "cmpl $0, %[locked]\n"
105 : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
109 while (__sync_lock_test_and_set(&sl->locked, 1))
116 * Release the spinlock.
119 * A pointer to the spinlock.
122 rte_spinlock_unlock (rte_spinlock_t *sl)
124 #ifndef RTE_FORCE_INTRINSICS
127 "xchg %[locked], %[ulv]\n"
128 : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
129 : "[ulv]" (unlock_val)
132 __sync_lock_release(&sl->locked);
137 * Try to take the lock.
140 * A pointer to the spinlock.
142 * 1 if the lock is successfully taken; 0 otherwise.
145 rte_spinlock_trylock (rte_spinlock_t *sl)
147 #ifndef RTE_FORCE_INTRINSICS
151 "xchg %[locked], %[lockval]"
152 : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
153 : "[lockval]" (lockval)
156 return (lockval == 0);
158 return (__sync_lock_test_and_set(&sl->locked,1) == 0);
163 * Test if the lock is taken.
166 * A pointer to the spinlock.
168 * 1 if the lock is currently taken; 0 otherwise.
170 static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
176 * The rte_spinlock_recursive_t type.
179 rte_spinlock_t sl; /**< the actual spinlock */
180 volatile int user; /**< core id using lock, -1 for unused */
181 volatile int count; /**< count of time this lock has been called */
182 } rte_spinlock_recursive_t;
185 * A static recursive spinlock initializer.
187 #define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
190 * Initialize the recursive spinlock to an unlocked state.
193 * A pointer to the recursive spinlock.
195 static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
197 rte_spinlock_init(&slr->sl);
203 * Take the recursive spinlock.
206 * A pointer to the recursive spinlock.
208 static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
210 int id = rte_lcore_id();
212 if (slr->user != id) {
213 rte_spinlock_lock(&slr->sl);
219 * Release the recursive spinlock.
222 * A pointer to the recursive spinlock.
224 static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
226 if (--(slr->count) == 0) {
228 rte_spinlock_unlock(&slr->sl);
234 * Try to take the recursive lock.
237 * A pointer to the recursive spinlock.
239 * 1 if the lock is successfully taken; 0 otherwise.
241 static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
243 int id = rte_lcore_id();
245 if (slr->user != id) {
246 if (rte_spinlock_trylock(&slr->sl) == 0)
258 #endif /* _RTE_SPINLOCK_H_ */