4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_SPINLOCK_X86_64_H_
35 #define _RTE_SPINLOCK_X86_64_H_
41 #include "generic/rte_spinlock.h"
43 #include "rte_cpuflags.h"
44 #include "rte_branch_prediction.h"
45 #include "rte_common.h"
47 #define RTE_RTM_MAX_RETRIES (10)
48 #define RTE_XABORT_LOCK_BUSY (0xff)
50 #ifndef RTE_FORCE_INTRINSICS
52 rte_spinlock_lock(rte_spinlock_t *sl)
57 "xchg %[locked], %[lv]\n"
62 "cmpl $0, %[locked]\n"
66 : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
72 rte_spinlock_unlock (rte_spinlock_t *sl)
76 "xchg %[locked], %[ulv]\n"
77 : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
78 : "[ulv]" (unlock_val)
83 rte_spinlock_trylock (rte_spinlock_t *sl)
88 "xchg %[locked], %[lockval]"
89 : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
90 : "[lockval]" (lockval)
97 static uint8_t rtm_supported; /* cache the flag to avoid the overhead
98 of the rte_cpu_get_flag_enabled function */
100 static inline void __attribute__((constructor))
103 rtm_supported = rte_cpu_get_flag_enabled(RTE_CPUFLAG_RTM);
106 static inline int rte_tm_supported(void)
108 return rtm_supported;
112 rte_try_tm(volatile int *lock)
117 int retries = RTE_RTM_MAX_RETRIES;
119 while (likely(retries--)) {
121 unsigned int status = rte_xbegin();
123 if (likely(RTE_XBEGIN_STARTED == status)) {
125 rte_xabort(RTE_XABORT_LOCK_BUSY);
132 if ((status & RTE_XABORT_EXPLICIT) &&
133 (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))
136 if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
143 rte_spinlock_lock_tm(rte_spinlock_t *sl)
145 if (likely(rte_try_tm(&sl->locked)))
148 rte_spinlock_lock(sl); /* fall-back */
152 rte_spinlock_trylock_tm(rte_spinlock_t *sl)
154 if (likely(rte_try_tm(&sl->locked)))
157 return rte_spinlock_trylock(sl);
161 rte_spinlock_unlock_tm(rte_spinlock_t *sl)
163 if (unlikely(sl->locked))
164 rte_spinlock_unlock(sl);
170 rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
172 if (likely(rte_try_tm(&slr->sl.locked)))
175 rte_spinlock_recursive_lock(slr); /* fall-back */
179 rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
181 if (unlikely(slr->sl.locked))
182 rte_spinlock_recursive_unlock(slr);
188 rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
190 if (likely(rte_try_tm(&slr->sl.locked)))
193 return rte_spinlock_recursive_trylock(slr);
201 #endif /* _RTE_SPINLOCK_X86_64_H_ */