4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _RTE_ATOMIC_X86_H_
35 #define _RTE_ATOMIC_X86_H_
42 #include <rte_common.h>
43 #include <emmintrin.h>
44 #include "generic/rte_atomic.h"
46 #if RTE_MAX_LCORE == 1
47 #define MPLOCKED /**< No need to insert MP lock prefix. */
49 #define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
52 #define rte_mb() _mm_mfence()
54 #define rte_wmb() _mm_sfence()
56 #define rte_rmb() _mm_lfence()
58 #define rte_smp_mb() rte_mb()
60 #define rte_smp_wmb() rte_compiler_barrier()
62 #define rte_smp_rmb() rte_compiler_barrier()
64 /*------------------------- 16 bit atomic operations -------------------------*/
66 #ifndef RTE_FORCE_INTRINSICS
68 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
74 "cmpxchgw %[src], %[dst];"
76 : [res] "=a" (res), /* output */
78 : [src] "r" (src), /* input */
81 : "memory"); /* no-clobber list */
85 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
87 return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
91 rte_atomic16_inc(rte_atomic16_t *v)
96 : [cnt] "=m" (v->cnt) /* output */
97 : "m" (v->cnt) /* input */
102 rte_atomic16_dec(rte_atomic16_t *v)
107 : [cnt] "=m" (v->cnt) /* output */
108 : "m" (v->cnt) /* input */
112 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
120 : [cnt] "+m" (v->cnt), /* output */
126 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
130 asm volatile(MPLOCKED
133 : [cnt] "+m" (v->cnt), /* output */
139 /*------------------------- 32 bit atomic operations -------------------------*/
142 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
148 "cmpxchgl %[src], %[dst];"
150 : [res] "=a" (res), /* output */
152 : [src] "r" (src), /* input */
155 : "memory"); /* no-clobber list */
159 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
161 return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
165 rte_atomic32_inc(rte_atomic32_t *v)
170 : [cnt] "=m" (v->cnt) /* output */
171 : "m" (v->cnt) /* input */
176 rte_atomic32_dec(rte_atomic32_t *v)
181 : [cnt] "=m" (v->cnt) /* output */
182 : "m" (v->cnt) /* input */
186 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
194 : [cnt] "+m" (v->cnt), /* output */
200 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
204 asm volatile(MPLOCKED
207 : [cnt] "+m" (v->cnt), /* output */
215 #include "rte_atomic_32.h"
217 #include "rte_atomic_64.h"
224 #endif /* _RTE_ATOMIC_X86_H_ */