1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef _RTE_ATOMIC_X86_H_
6 #define _RTE_ATOMIC_X86_H_
13 #include <rte_common.h>
14 #include <rte_config.h>
15 #include <emmintrin.h>
16 #include "generic/rte_atomic.h"
18 #if RTE_MAX_LCORE == 1
19 #define MPLOCKED /**< No need to insert MP lock prefix. */
21 #define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
24 #define rte_mb() _mm_mfence()
26 #define rte_wmb() _mm_sfence()
28 #define rte_rmb() _mm_lfence()
30 #define rte_smp_wmb() rte_compiler_barrier()
32 #define rte_smp_rmb() rte_compiler_barrier()
35 * From Intel Software Development Manual; Vol 3;
36 * 8.2.2 Memory Ordering in P6 and More Recent Processor Families:
38 * . Reads are not reordered with other reads.
39 * . Writes are not reordered with older reads.
40 * . Writes to memory are not reordered with other writes,
41 * with the following exceptions:
42 * . streaming stores (writes) executed with the non-temporal move
43 * instructions (MOVNTI, MOVNTQ, MOVNTDQ, MOVNTPS, and MOVNTPD); and
44 * . string operations (see Section 8.2.4.1).
46 * . Reads may be reordered with older writes to different locations but not
47 * with older writes to the same location.
48 * . Reads or writes cannot be reordered with I/O instructions,
49 * locked instructions, or serializing instructions.
50 * . Reads cannot pass earlier LFENCE and MFENCE instructions.
51 * . Writes ... cannot pass earlier LFENCE, SFENCE, and MFENCE instructions.
52 * . LFENCE instructions cannot pass earlier reads.
53 * . SFENCE instructions cannot pass earlier writes ...
54 * . MFENCE instructions cannot pass earlier reads, writes ...
56 * As pointed by Java guys, that makes possible to use lock-prefixed
57 * instructions to get the same effect as mfence and on most modern HW
58 * that gives a better performance then using mfence:
59 * https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
60 * Basic idea is to use lock prefixed add with some dummy memory location
61 * as the destination. From their experiments 128B(2 cache lines) below
62 * current stack pointer looks like a good candidate.
63 * So below we use that techinque for rte_smp_mb() implementation.
66 static __rte_always_inline void
70 asm volatile("lock addl $0, -128(%%esp); " ::: "memory");
72 asm volatile("lock addl $0, -128(%%rsp); " ::: "memory");
76 #define rte_io_mb() rte_mb()
78 #define rte_io_wmb() rte_compiler_barrier()
80 #define rte_io_rmb() rte_compiler_barrier()
83 * Synchronization fence between threads based on the specified memory order.
85 * On x86 the __atomic_thread_fence(__ATOMIC_SEQ_CST) generates full 'mfence'
86 * which is quite expensive. The optimized implementation of rte_smp_mb is
89 static __rte_always_inline void
90 rte_atomic_thread_fence(int memory_order)
92 if (memory_order == __ATOMIC_SEQ_CST)
95 __atomic_thread_fence(memory_order);
98 /*------------------------- 16 bit atomic operations -------------------------*/
100 #ifndef RTE_FORCE_INTRINSICS
102 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
108 "cmpxchgw %[src], %[dst];"
110 : [res] "=a" (res), /* output */
112 : [src] "r" (src), /* input */
115 : "memory"); /* no-clobber list */
119 static inline uint16_t
120 rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
125 : "=r" (val), "=m" (*dst)
126 : "0" (val), "m" (*dst)
127 : "memory"); /* no-clobber list */
131 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
133 return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
137 rte_atomic16_inc(rte_atomic16_t *v)
142 : [cnt] "=m" (v->cnt) /* output */
143 : "m" (v->cnt) /* input */
148 rte_atomic16_dec(rte_atomic16_t *v)
153 : [cnt] "=m" (v->cnt) /* output */
154 : "m" (v->cnt) /* input */
158 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
166 : [cnt] "+m" (v->cnt), /* output */
172 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
176 asm volatile(MPLOCKED
179 : [cnt] "+m" (v->cnt), /* output */
185 /*------------------------- 32 bit atomic operations -------------------------*/
188 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
194 "cmpxchgl %[src], %[dst];"
196 : [res] "=a" (res), /* output */
198 : [src] "r" (src), /* input */
201 : "memory"); /* no-clobber list */
205 static inline uint32_t
206 rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
211 : "=r" (val), "=m" (*dst)
212 : "0" (val), "m" (*dst)
213 : "memory"); /* no-clobber list */
217 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
219 return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
223 rte_atomic32_inc(rte_atomic32_t *v)
228 : [cnt] "=m" (v->cnt) /* output */
229 : "m" (v->cnt) /* input */
234 rte_atomic32_dec(rte_atomic32_t *v)
239 : [cnt] "=m" (v->cnt) /* output */
240 : "m" (v->cnt) /* input */
244 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
252 : [cnt] "+m" (v->cnt), /* output */
258 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
262 asm volatile(MPLOCKED
265 : [cnt] "+m" (v->cnt), /* output */
273 #include "rte_atomic_32.h"
275 #include "rte_atomic_64.h"
282 #endif /* _RTE_ATOMIC_X86_H_ */