4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Inspired from FreeBSD src/sys/amd64/include/atomic.h
36 * Copyright (c) 1998 Doug Rabson
37 * All rights reserved.
40 #ifndef _RTE_ATOMIC_X86_64_H_
41 #define _RTE_ATOMIC_X86_64_H_
47 #include <emmintrin.h>
48 #include "generic/rte_atomic.h"
50 #if RTE_MAX_LCORE == 1
51 #define MPLOCKED /**< No need to insert MP lock prefix. */
53 #define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
56 #define rte_mb() _mm_mfence()
58 #define rte_wmb() _mm_sfence()
60 #define rte_rmb() _mm_lfence()
62 /*------------------------- 16 bit atomic operations -------------------------*/
64 #ifndef RTE_FORCE_INTRINSICS
66 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
72 "cmpxchgw %[src], %[dst];"
74 : [res] "=a" (res), /* output */
76 : [src] "r" (src), /* input */
79 : "memory"); /* no-clobber list */
83 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
85 return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
89 rte_atomic16_inc(rte_atomic16_t *v)
94 : [cnt] "=m" (v->cnt) /* output */
95 : "m" (v->cnt) /* input */
100 rte_atomic16_dec(rte_atomic16_t *v)
105 : [cnt] "=m" (v->cnt) /* output */
106 : "m" (v->cnt) /* input */
110 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
118 : [cnt] "+m" (v->cnt), /* output */
124 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
128 asm volatile(MPLOCKED
131 : [cnt] "+m" (v->cnt), /* output */
137 /*------------------------- 32 bit atomic operations -------------------------*/
140 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
146 "cmpxchgl %[src], %[dst];"
148 : [res] "=a" (res), /* output */
150 : [src] "r" (src), /* input */
153 : "memory"); /* no-clobber list */
157 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
159 return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
163 rte_atomic32_inc(rte_atomic32_t *v)
168 : [cnt] "=m" (v->cnt) /* output */
169 : "m" (v->cnt) /* input */
174 rte_atomic32_dec(rte_atomic32_t *v)
179 : [cnt] "=m" (v->cnt) /* output */
180 : "m" (v->cnt) /* input */
184 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
192 : [cnt] "+m" (v->cnt), /* output */
198 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
202 asm volatile(MPLOCKED
205 : [cnt] "+m" (v->cnt), /* output */
211 /*------------------------- 64 bit atomic operations -------------------------*/
214 rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
221 "cmpxchgq %[src], %[dst];"
223 : [res] "=a" (res), /* output */
225 : [src] "r" (src), /* input */
228 : "memory"); /* no-clobber list */
234 rte_atomic64_init(rte_atomic64_t *v)
239 static inline int64_t
240 rte_atomic64_read(rte_atomic64_t *v)
246 rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
252 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
256 "addq %[inc], %[cnt]"
257 : [cnt] "=m" (v->cnt) /* output */
258 : [inc] "ir" (inc), /* input */
264 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
268 "subq %[dec], %[cnt]"
269 : [cnt] "=m" (v->cnt) /* output */
270 : [dec] "ir" (dec), /* input */
276 rte_atomic64_inc(rte_atomic64_t *v)
281 : [cnt] "=m" (v->cnt) /* output */
282 : "m" (v->cnt) /* input */
287 rte_atomic64_dec(rte_atomic64_t *v)
292 : [cnt] "=m" (v->cnt) /* output */
293 : "m" (v->cnt) /* input */
297 static inline int64_t
298 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
304 "xaddq %[prev], %[cnt]"
305 : [prev] "+r" (prev), /* output */
307 : "m" (v->cnt) /* input */
312 static inline int64_t
313 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
315 return rte_atomic64_add_return(v, -dec);
318 static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
326 : [cnt] "+m" (v->cnt), /* output */
333 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
341 : [cnt] "+m" (v->cnt), /* output */
347 static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
349 return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
352 static inline void rte_atomic64_clear(rte_atomic64_t *v)
362 #endif /* _RTE_ATOMIC_X86_64_H_ */