eal: remove deprecated coherent IO memory barriers
[dpdk.git] / lib / librte_eal / ppc / include / rte_atomic.h
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
4  * Copyright (c) 2008 Marcel Moolenaar
5  * Copyright (c) 2001 Benno Rice
6  * Copyright (c) 2001 David E. O'Brien
7  * Copyright (c) 1998 Doug Rabson
8  * All rights reserved.
9  */
10
11 #ifndef _RTE_ATOMIC_PPC_64_H_
12 #define _RTE_ATOMIC_PPC_64_H_
13
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17
18 #include <stdint.h>
19 #include "generic/rte_atomic.h"
20
21 #define rte_mb()  asm volatile("sync" : : : "memory")
22
23 #define rte_wmb() asm volatile("sync" : : : "memory")
24
25 #define rte_rmb() asm volatile("sync" : : : "memory")
26
27 #define rte_smp_mb() rte_mb()
28
29 #define rte_smp_wmb() rte_wmb()
30
31 #define rte_smp_rmb() rte_rmb()
32
33 #define rte_io_mb() rte_mb()
34
35 #define rte_io_wmb() rte_wmb()
36
37 #define rte_io_rmb() rte_rmb()
38
39 static __rte_always_inline void
40 rte_atomic_thread_fence(int memory_order)
41 {
42         __atomic_thread_fence(memory_order);
43 }
44
45 /*------------------------- 16 bit atomic operations -------------------------*/
46 /* To be compatible with Power7, use GCC built-in functions for 16 bit
47  * operations */
48
49 #ifndef RTE_FORCE_INTRINSICS
50 static inline int
51 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
52 {
53         return __atomic_compare_exchange(dst, &exp, &src, 0, __ATOMIC_ACQUIRE,
54                 __ATOMIC_ACQUIRE) ? 1 : 0;
55 }
56
57 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
58 {
59         return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
60 }
61
62 static inline void
63 rte_atomic16_inc(rte_atomic16_t *v)
64 {
65         __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
66 }
67
68 static inline void
69 rte_atomic16_dec(rte_atomic16_t *v)
70 {
71         __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
72 }
73
74 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
75 {
76         return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
77 }
78
79 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
80 {
81         return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
82 }
83
84 static inline uint16_t
85 rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
86 {
87         return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
88 }
89
90 /*------------------------- 32 bit atomic operations -------------------------*/
91
92 static inline int
93 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
94 {
95         unsigned int ret = 0;
96
97         asm volatile(
98                         "\tlwsync\n"
99                         "1:\tlwarx %[ret], 0, %[dst]\n"
100                         "cmplw %[exp], %[ret]\n"
101                         "bne 2f\n"
102                         "stwcx. %[src], 0, %[dst]\n"
103                         "bne- 1b\n"
104                         "li %[ret], 1\n"
105                         "b 3f\n"
106                         "2:\n"
107                         "stwcx. %[ret], 0, %[dst]\n"
108                         "li %[ret], 0\n"
109                         "3:\n"
110                         "isync\n"
111                         : [ret] "=&r" (ret), "=m" (*dst)
112                         : [dst] "r" (dst),
113                           [exp] "r" (exp),
114                           [src] "r" (src),
115                           "m" (*dst)
116                         : "cc", "memory");
117
118         return ret;
119 }
120
121 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
122 {
123         return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
124 }
125
126 static inline void
127 rte_atomic32_inc(rte_atomic32_t *v)
128 {
129         int t;
130
131         asm volatile(
132                         "1: lwarx %[t],0,%[cnt]\n"
133                         "addic %[t],%[t],1\n"
134                         "stwcx. %[t],0,%[cnt]\n"
135                         "bne- 1b\n"
136                         : [t] "=&r" (t), "=m" (v->cnt)
137                         : [cnt] "r" (&v->cnt), "m" (v->cnt)
138                         : "cc", "xer", "memory");
139 }
140
141 static inline void
142 rte_atomic32_dec(rte_atomic32_t *v)
143 {
144         int t;
145
146         asm volatile(
147                         "1: lwarx %[t],0,%[cnt]\n"
148                         "addic %[t],%[t],-1\n"
149                         "stwcx. %[t],0,%[cnt]\n"
150                         "bne- 1b\n"
151                         : [t] "=&r" (t), "=m" (v->cnt)
152                         : [cnt] "r" (&v->cnt), "m" (v->cnt)
153                         : "cc", "xer", "memory");
154 }
155
156 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
157 {
158         int ret;
159
160         asm volatile(
161                         "\n\tlwsync\n"
162                         "1: lwarx %[ret],0,%[cnt]\n"
163                         "addic  %[ret],%[ret],1\n"
164                         "stwcx. %[ret],0,%[cnt]\n"
165                         "bne- 1b\n"
166                         "isync\n"
167                         : [ret] "=&r" (ret)
168                         : [cnt] "r" (&v->cnt)
169                         : "cc", "xer", "memory");
170
171         return ret == 0;
172 }
173
174 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
175 {
176         int ret;
177
178         asm volatile(
179                         "\n\tlwsync\n"
180                         "1: lwarx %[ret],0,%[cnt]\n"
181                         "addic %[ret],%[ret],-1\n"
182                         "stwcx. %[ret],0,%[cnt]\n"
183                         "bne- 1b\n"
184                         "isync\n"
185                         : [ret] "=&r" (ret)
186                         : [cnt] "r" (&v->cnt)
187                         : "cc", "xer", "memory");
188
189         return ret == 0;
190 }
191
192 static inline uint32_t
193 rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
194 {
195         return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
196 }
197
198 /*------------------------- 64 bit atomic operations -------------------------*/
199
200 static inline int
201 rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
202 {
203         unsigned int ret = 0;
204
205         asm volatile (
206                         "\tlwsync\n"
207                         "1: ldarx %[ret], 0, %[dst]\n"
208                         "cmpld %[exp], %[ret]\n"
209                         "bne 2f\n"
210                         "stdcx. %[src], 0, %[dst]\n"
211                         "bne- 1b\n"
212                         "li %[ret], 1\n"
213                         "b 3f\n"
214                         "2:\n"
215                         "stdcx. %[ret], 0, %[dst]\n"
216                         "li %[ret], 0\n"
217                         "3:\n"
218                         "isync\n"
219                         : [ret] "=&r" (ret), "=m" (*dst)
220                         : [dst] "r" (dst),
221                           [exp] "r" (exp),
222                           [src] "r" (src),
223                           "m" (*dst)
224                         : "cc", "memory");
225         return ret;
226 }
227
228 static inline void
229 rte_atomic64_init(rte_atomic64_t *v)
230 {
231         v->cnt = 0;
232 }
233
234 static inline int64_t
235 rte_atomic64_read(rte_atomic64_t *v)
236 {
237         long ret;
238
239         asm volatile("ld%U1%X1 %[ret],%[cnt]"
240                 : [ret] "=r"(ret)
241                 : [cnt] "m"(v->cnt));
242
243         return ret;
244 }
245
246 static inline void
247 rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
248 {
249         asm volatile("std%U0%X0 %[new_value],%[cnt]"
250                 : [cnt] "=m"(v->cnt)
251                 : [new_value] "r"(new_value));
252 }
253
254 static inline void
255 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
256 {
257         long t;
258
259         asm volatile(
260                         "1: ldarx %[t],0,%[cnt]\n"
261                         "add %[t],%[inc],%[t]\n"
262                         "stdcx. %[t],0,%[cnt]\n"
263                         "bne- 1b\n"
264                         : [t] "=&r" (t), "=m" (v->cnt)
265                         : [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
266                         : "cc", "memory");
267 }
268
269 static inline void
270 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
271 {
272         long t;
273
274         asm volatile(
275                         "1: ldarx %[t],0,%[cnt]\n"
276                         "subf %[t],%[dec],%[t]\n"
277                         "stdcx. %[t],0,%[cnt]\n"
278                         "bne- 1b\n"
279                         : [t] "=&r" (t), "+m" (v->cnt)
280                         : [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
281                         : "cc", "memory");
282 }
283
284 static inline void
285 rte_atomic64_inc(rte_atomic64_t *v)
286 {
287         long t;
288
289         asm volatile(
290                         "1: ldarx %[t],0,%[cnt]\n"
291                         "addic %[t],%[t],1\n"
292                         "stdcx. %[t],0,%[cnt]\n"
293                         "bne- 1b\n"
294                         : [t] "=&r" (t), "+m" (v->cnt)
295                         : [cnt] "r" (&v->cnt), "m" (v->cnt)
296                         : "cc", "xer", "memory");
297 }
298
299 static inline void
300 rte_atomic64_dec(rte_atomic64_t *v)
301 {
302         long t;
303
304         asm volatile(
305                         "1: ldarx %[t],0,%[cnt]\n"
306                         "addic %[t],%[t],-1\n"
307                         "stdcx. %[t],0,%[cnt]\n"
308                         "bne- 1b\n"
309                         : [t] "=&r" (t), "+m" (v->cnt)
310                         : [cnt] "r" (&v->cnt), "m" (v->cnt)
311                         : "cc", "xer", "memory");
312 }
313
314 static inline int64_t
315 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
316 {
317         long ret;
318
319         asm volatile(
320                         "\n\tlwsync\n"
321                         "1: ldarx %[ret],0,%[cnt]\n"
322                         "add %[ret],%[inc],%[ret]\n"
323                         "stdcx. %[ret],0,%[cnt]\n"
324                         "bne- 1b\n"
325                         "isync\n"
326                         : [ret] "=&r" (ret)
327                         : [inc] "r" (inc), [cnt] "r" (&v->cnt)
328                         : "cc", "memory");
329
330         return ret;
331 }
332
333 static inline int64_t
334 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
335 {
336         long ret;
337
338         asm volatile(
339                         "\n\tlwsync\n"
340                         "1: ldarx %[ret],0,%[cnt]\n"
341                         "subf %[ret],%[dec],%[ret]\n"
342                         "stdcx. %[ret],0,%[cnt]\n"
343                         "bne- 1b\n"
344                         "isync\n"
345                         : [ret] "=&r" (ret)
346                         : [dec] "r" (dec), [cnt] "r" (&v->cnt)
347                         : "cc", "memory");
348
349         return ret;
350 }
351
352 static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
353 {
354         long ret;
355
356         asm volatile(
357                         "\n\tlwsync\n"
358                         "1: ldarx %[ret],0,%[cnt]\n"
359                         "addic %[ret],%[ret],1\n"
360                         "stdcx. %[ret],0,%[cnt]\n"
361                         "bne- 1b\n"
362                         "isync\n"
363                         : [ret] "=&r" (ret)
364                         : [cnt] "r" (&v->cnt)
365                         : "cc", "xer", "memory");
366
367         return ret == 0;
368 }
369
370 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
371 {
372         long ret;
373
374         asm volatile(
375                         "\n\tlwsync\n"
376                         "1: ldarx %[ret],0,%[cnt]\n"
377                         "addic %[ret],%[ret],-1\n"
378                         "stdcx. %[ret],0,%[cnt]\n"
379                         "bne- 1b\n"
380                         "isync\n"
381                         : [ret] "=&r" (ret)
382                         : [cnt] "r" (&v->cnt)
383                         : "cc", "xer", "memory");
384
385         return ret == 0;
386 }
387
388 static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
389 {
390         return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
391 }
392 /**
393  * Atomically set a 64-bit counter to 0.
394  *
395  * @param v
396  *   A pointer to the atomic counter.
397  */
398 static inline void rte_atomic64_clear(rte_atomic64_t *v)
399 {
400         v->cnt = 0;
401 }
402
403 static inline uint64_t
404 rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
405 {
406         return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
407 }
408
409 #endif
410
411 #ifdef __cplusplus
412 }
413 #endif
414
415 #endif /* _RTE_ATOMIC_PPC_64_H_ */