1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef _RTE_SPINLOCK_X86_64_H_
6 #define _RTE_SPINLOCK_X86_64_H_
12 #include "generic/rte_spinlock.h"
14 #include "rte_cpuflags.h"
15 #include "rte_branch_prediction.h"
16 #include "rte_common.h"
17 #include "rte_pause.h"
18 #include "rte_cycles.h"
20 #define RTE_RTM_MAX_RETRIES (20)
21 #define RTE_XABORT_LOCK_BUSY (0xff)
23 #ifndef RTE_FORCE_INTRINSICS
25 rte_spinlock_lock(rte_spinlock_t *sl)
30 "xchg %[locked], %[lv]\n"
35 "cmpl $0, %[locked]\n"
39 : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
45 rte_spinlock_unlock (rte_spinlock_t *sl)
49 "xchg %[locked], %[ulv]\n"
50 : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
51 : "[ulv]" (unlock_val)
56 rte_spinlock_trylock (rte_spinlock_t *sl)
61 "xchg %[locked], %[lockval]"
62 : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
63 : "[lockval]" (lockval)
70 extern uint8_t rte_rtm_supported;
72 static inline int rte_tm_supported(void)
74 return rte_rtm_supported;
78 rte_try_tm(volatile int *lock)
82 if (!rte_rtm_supported)
85 retries = RTE_RTM_MAX_RETRIES;
87 while (likely(retries--)) {
89 unsigned int status = rte_xbegin();
91 if (likely(RTE_XBEGIN_STARTED == status)) {
93 rte_xabort(RTE_XABORT_LOCK_BUSY);
100 if ((status & RTE_XABORT_CONFLICT) ||
101 ((status & RTE_XABORT_EXPLICIT) &&
102 (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))) {
103 /* add a small delay before retrying, basing the
104 * delay on the number of times we've already tried,
105 * to give a back-off type of behaviour. We
106 * randomize trycount by taking bits from the tsc count
108 int try_count = RTE_RTM_MAX_RETRIES - retries;
109 int pause_count = (rte_rdtsc() & 0x7) | 1;
110 pause_count <<= try_count;
111 for (i = 0; i < pause_count; i++)
116 if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
123 rte_spinlock_lock_tm(rte_spinlock_t *sl)
125 if (likely(rte_try_tm(&sl->locked)))
128 rte_spinlock_lock(sl); /* fall-back */
132 rte_spinlock_trylock_tm(rte_spinlock_t *sl)
134 if (likely(rte_try_tm(&sl->locked)))
137 return rte_spinlock_trylock(sl);
141 rte_spinlock_unlock_tm(rte_spinlock_t *sl)
143 if (unlikely(sl->locked))
144 rte_spinlock_unlock(sl);
150 rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
152 if (likely(rte_try_tm(&slr->sl.locked)))
155 rte_spinlock_recursive_lock(slr); /* fall-back */
159 rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
161 if (unlikely(slr->sl.locked))
162 rte_spinlock_recursive_unlock(slr);
168 rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
170 if (likely(rte_try_tm(&slr->sl.locked)))
173 return rte_spinlock_recursive_trylock(slr);
181 #endif /* _RTE_SPINLOCK_X86_64_H_ */