eal: add wrapper for C11 atomic thread fence
[dpdk.git] / lib / librte_eal / ppc / include / rte_atomic.h
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
4  * Copyright (c) 2008 Marcel Moolenaar
5  * Copyright (c) 2001 Benno Rice
6  * Copyright (c) 2001 David E. O'Brien
7  * Copyright (c) 1998 Doug Rabson
8  * All rights reserved.
9  */
10
11 #ifndef _RTE_ATOMIC_PPC_64_H_
12 #define _RTE_ATOMIC_PPC_64_H_
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 #include <stdint.h>
19 #include "generic/rte_atomic.h"
20
21 #define rte_mb()  asm volatile("sync" : : : "memory")
22
23 #define rte_wmb() asm volatile("sync" : : : "memory")
24
25 #define rte_rmb() asm volatile("sync" : : : "memory")
26
27 #define rte_smp_mb() rte_mb()
28
29 #define rte_smp_wmb() rte_wmb()
30
31 #define rte_smp_rmb() rte_rmb()
32
33 #define rte_io_mb() rte_mb()
34
35 #define rte_io_wmb() rte_wmb()
36
37 #define rte_io_rmb() rte_rmb()
38
39 #define rte_cio_wmb() rte_wmb()
40
41 #define rte_cio_rmb() rte_rmb()
42
43 static __rte_always_inline void
44 rte_atomic_thread_fence(int memory_order)
45 {
46         __atomic_thread_fence(memory_order);
47 }
48
49 /*------------------------- 16 bit atomic operations -------------------------*/
50 /* To be compatible with Power7, use GCC built-in functions for 16 bit
51  * operations */
52
53 #ifndef RTE_FORCE_INTRINSICS
54 static inline int
55 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
56 {
57         return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
58                 __ATOMIC_ACQUIRE) ? 1 : 0;
59 }
60
61 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
62 {
63         return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
64 }
65
66 static inline void
67 rte_atomic16_inc(rte_atomic16_t *v)
68 {
69         __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
70 }
71
72 static inline void
73 rte_atomic16_dec(rte_atomic16_t *v)
74 {
75         __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
76 }
77
78 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
79 {
80         return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
81 }
82
83 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
84 {
85         return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
86 }
87
88 static inline uint16_t
89 rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
90 {
91         return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
92 }
93
94 /*------------------------- 32 bit atomic operations -------------------------*/
95
96 static inline int
97 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
98 {
99         unsigned int ret = 0;
100
101         asm volatile(
102                         "\tlwsync\n"
103                         "1:\tlwarx %[ret], 0, %[dst]\n"
104                         "cmplw %[exp], %[ret]\n"
105                         "bne 2f\n"
106                         "stwcx. %[src], 0, %[dst]\n"
107                         "bne- 1b\n"
108                         "li %[ret], 1\n"
109                         "b 3f\n"
110                         "2:\n"
111                         "stwcx. %[ret], 0, %[dst]\n"
112                         "li %[ret], 0\n"
113                         "3:\n"
114                         "isync\n"
115                         : [ret] "=&r" (ret), "=m" (*dst)
116                         : [dst] "r" (dst),
117                           [exp] "r" (exp),
118                           [src] "r" (src),
119                           "m" (*dst)
120                         : "cc", "memory");
121
122         return ret;
123 }
124
125 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
126 {
127         return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
128 }
129
130 static inline void
131 rte_atomic32_inc(rte_atomic32_t *v)
132 {
133         int t;
134
135         asm volatile(
136                         "1: lwarx %[t],0,%[cnt]\n"
137                         "addic %[t],%[t],1\n"
138                         "stwcx. %[t],0,%[cnt]\n"
139                         "bne- 1b\n"
140                         : [t] "=&r" (t), "=m" (v->cnt)
141                         : [cnt] "r" (&v->cnt), "m" (v->cnt)
142                         : "cc", "xer", "memory");
143 }
144
145 static inline void
146 rte_atomic32_dec(rte_atomic32_t *v)
147 {
148         int t;
149
150         asm volatile(
151                         "1: lwarx %[t],0,%[cnt]\n"
152                         "addic %[t],%[t],-1\n"
153                         "stwcx. %[t],0,%[cnt]\n"
154                         "bne- 1b\n"
155                         : [t] "=&r" (t), "=m" (v->cnt)
156                         : [cnt] "r" (&v->cnt), "m" (v->cnt)
157                         : "cc", "xer", "memory");
158 }
159
160 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
161 {
162         int ret;
163
164         asm volatile(
165                         "\n\tlwsync\n"
166                         "1: lwarx %[ret],0,%[cnt]\n"
167                         "addic  %[ret],%[ret],1\n"
168                         "stwcx. %[ret],0,%[cnt]\n"
169                         "bne- 1b\n"
170                         "isync\n"
171                         : [ret] "=&r" (ret)
172                         : [cnt] "r" (&v->cnt)
173                         : "cc", "xer", "memory");
174
175         return ret == 0;
176 }
177
178 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
179 {
180         int ret;
181
182         asm volatile(
183                         "\n\tlwsync\n"
184                         "1: lwarx %[ret],0,%[cnt]\n"
185                         "addic %[ret],%[ret],-1\n"
186                         "stwcx. %[ret],0,%[cnt]\n"
187                         "bne- 1b\n"
188                         "isync\n"
189                         : [ret] "=&r" (ret)
190                         : [cnt] "r" (&v->cnt)
191                         : "cc", "xer", "memory");
192
193         return ret == 0;
194 }
195
196 static inline uint32_t
197 rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
198 {
199         return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
200 }
201
202 /*------------------------- 64 bit atomic operations -------------------------*/
203
204 static inline int
205 rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
206 {
207         unsigned int ret = 0;
208
209         asm volatile (
210                         "\tlwsync\n"
211                         "1: ldarx %[ret], 0, %[dst]\n"
212                         "cmpld %[exp], %[ret]\n"
213                         "bne 2f\n"
214                         "stdcx. %[src], 0, %[dst]\n"
215                         "bne- 1b\n"
216                         "li %[ret], 1\n"
217                         "b 3f\n"
218                         "2:\n"
219                         "stdcx. %[ret], 0, %[dst]\n"
220                         "li %[ret], 0\n"
221                         "3:\n"
222                         "isync\n"
223                         : [ret] "=&r" (ret), "=m" (*dst)
224                         : [dst] "r" (dst),
225                           [exp] "r" (exp),
226                           [src] "r" (src),
227                           "m" (*dst)
228                         : "cc", "memory");
229         return ret;
230 }
231
232 static inline void
233 rte_atomic64_init(rte_atomic64_t *v)
234 {
235         v->cnt = 0;
236 }
237
238 static inline int64_t
239 rte_atomic64_read(rte_atomic64_t *v)
240 {
241         long ret;
242
243         asm volatile("ld%U1%X1 %[ret],%[cnt]"
244                 : [ret] "=r"(ret)
245                 : [cnt] "m"(v->cnt));
246
247         return ret;
248 }
249
250 static inline void
251 rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
252 {
253         asm volatile("std%U0%X0 %[new_value],%[cnt]"
254                 : [cnt] "=m"(v->cnt)
255                 : [new_value] "r"(new_value));
256 }
257
258 static inline void
259 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
260 {
261         long t;
262
263         asm volatile(
264                         "1: ldarx %[t],0,%[cnt]\n"
265                         "add %[t],%[inc],%[t]\n"
266                         "stdcx. %[t],0,%[cnt]\n"
267                         "bne- 1b\n"
268                         : [t] "=&r" (t), "=m" (v->cnt)
269                         : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
270                         : "cc", "memory");
271 }
272
273 static inline void
274 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
275 {
276         long t;
277
278         asm volatile(
279                         "1: ldarx %[t],0,%[cnt]\n"
280                         "subf %[t],%[dec],%[t]\n"
281                         "stdcx. %[t],0,%[cnt]\n"
282                         "bne- 1b\n"
283                         : [t] "=&r" (t), "+m" (v->cnt)
284                         : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
285                         : "cc", "memory");
286 }
287
288 static inline void
289 rte_atomic64_inc(rte_atomic64_t *v)
290 {
291         long t;
292
293         asm volatile(
294                         "1: ldarx %[t],0,%[cnt]\n"
295                         "addic %[t],%[t],1\n"
296                         "stdcx. %[t],0,%[cnt]\n"
297                         "bne- 1b\n"
298                         : [t] "=&r" (t), "+m" (v->cnt)
299                         : [cnt] "r" (&v->cnt), "m" (v->cnt)
300                         : "cc", "xer", "memory");
301 }
302
303 static inline void
304 rte_atomic64_dec(rte_atomic64_t *v)
305 {
306         long t;
307
308         asm volatile(
309                         "1: ldarx %[t],0,%[cnt]\n"
310                         "addic %[t],%[t],-1\n"
311                         "stdcx. %[t],0,%[cnt]\n"
312                         "bne- 1b\n"
313                         : [t] "=&r" (t), "+m" (v->cnt)
314                         : [cnt] "r" (&v->cnt), "m" (v->cnt)
315                         : "cc", "xer", "memory");
316 }
317
318 static inline int64_t
319 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
320 {
321         long ret;
322
323         asm volatile(
324                         "\n\tlwsync\n"
325                         "1: ldarx %[ret],0,%[cnt]\n"
326                         "add %[ret],%[inc],%[ret]\n"
327                         "stdcx. %[ret],0,%[cnt]\n"
328                         "bne- 1b\n"
329                         "isync\n"
330                         : [ret] "=&r" (ret)
331                         : [inc] "r" (inc), [cnt] "r" (&v->cnt)
332                         : "cc", "memory");
333
334         return ret;
335 }
336
337 static inline int64_t
338 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
339 {
340         long ret;
341
342         asm volatile(
343                         "\n\tlwsync\n"
344                         "1: ldarx %[ret],0,%[cnt]\n"
345                         "subf %[ret],%[dec],%[ret]\n"
346                         "stdcx. %[ret],0,%[cnt]\n"
347                         "bne- 1b\n"
348                         "isync\n"
349                         : [ret] "=&r" (ret)
350                         : [dec] "r" (dec), [cnt] "r" (&v->cnt)
351                         : "cc", "memory");
352
353         return ret;
354 }
355
356 static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
357 {
358         long ret;
359
360         asm volatile(
361                         "\n\tlwsync\n"
362                         "1: ldarx %[ret],0,%[cnt]\n"
363                         "addic %[ret],%[ret],1\n"
364                         "stdcx. %[ret],0,%[cnt]\n"
365                         "bne- 1b\n"
366                         "isync\n"
367                         : [ret] "=&r" (ret)
368                         : [cnt] "r" (&v->cnt)
369                         : "cc", "xer", "memory");
370
371         return ret == 0;
372 }
373
374 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
375 {
376         long ret;
377
378         asm volatile(
379                         "\n\tlwsync\n"
380                         "1: ldarx %[ret],0,%[cnt]\n"
381                         "addic %[ret],%[ret],-1\n"
382                         "stdcx. %[ret],0,%[cnt]\n"
383                         "bne- 1b\n"
384                         "isync\n"
385                         : [ret] "=&r" (ret)
386                         : [cnt] "r" (&v->cnt)
387                         : "cc", "xer", "memory");
388
389         return ret == 0;
390 }
391
392 static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
393 {
394         return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
395 }
396 /**
397  * Atomically set a 64-bit counter to 0.
398  *
399  * @param v
400  *   A pointer to the atomic counter.
401  */
402 static inline void rte_atomic64_clear(rte_atomic64_t *v)
403 {
404         v->cnt = 0;
405 }
406
407 static inline uint64_t
408 rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
409 {
410         return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
411 }
412
413 #endif
414
415 #ifdef __cplusplus
416 }
417 #endif
418
419 #endif /* _RTE_ATOMIC_PPC_64_H_ */