common/cnxk: support waiting for pool filling
[dpdk.git] / drivers / common / cnxk / roc_io.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _ROC_IO_H_
6 #define _ROC_IO_H_
7
8 #define ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id)                                  \
9         do {                                                                   \
10                 /* 32 Lines per core */                                        \
11                 lmt_id = plt_lcore_id() << ROC_LMT_LINES_PER_CORE_LOG2;        \
12                 /* Each line is of 128B */                                     \
13                 (lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2);    \
14         } while (0)
15
16 #define ROC_LMT_CPT_BASE_ID_GET(lmt_addr, lmt_id)                              \
17         do {                                                                   \
18                 /* 16 Lines per core */                                        \
19                 lmt_id = ROC_LMT_CPT_BASE_ID_OFF;                              \
20                 lmt_id += (plt_lcore_id() << ROC_LMT_CPT_LINES_PER_CORE_LOG2); \
21                 /* Each line is of 128B */                                     \
22                 (lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2);    \
23         } while (0)
24
25 #define roc_load_pair(val0, val1, addr)                                        \
26         ({                                                                     \
27                 asm volatile("ldp %x[x0], %x[x1], [%x[p1]]"                    \
28                              : [x0] "=r"(val0), [x1] "=r"(val1)                \
29                              : [p1] "r"(addr));                                \
30         })
31
32 #define roc_store_pair(val0, val1, addr)                                       \
33         ({                                                                     \
34                 asm volatile(                                                  \
35                         "stp %x[x0], %x[x1], [%x[p1], #0]!" ::[x0] "r"(val0),  \
36                         [x1] "r"(val1), [p1] "r"(addr));                       \
37         })
38
39 #define roc_prefetch_store_keep(ptr)                                           \
40         ({ asm volatile("prfm pstl1keep, [%x0]\n" : : "r"(ptr)); })
41
42 #if defined(__clang__)
43 static __plt_always_inline void
44 roc_atomic128_cas_noreturn(uint64_t swap0, uint64_t swap1, int64_t *ptr)
45 {
46         register uint64_t x0 __asm("x0") = swap0;
47         register uint64_t x1 __asm("x1") = swap1;
48
49         asm volatile(PLT_CPU_FEATURE_PREAMBLE
50                      "casp %[x0], %[x1], %[x0], %[x1], [%[ptr]]\n"
51                      : [x0] "+r"(x0), [x1] "+r"(x1)
52                      : [ptr] "r"(ptr)
53                      : "memory");
54 }
55 #else
56 static __plt_always_inline void
57 roc_atomic128_cas_noreturn(uint64_t swap0, uint64_t swap1, uint64_t ptr)
58 {
59         __uint128_t wdata = swap0 | ((__uint128_t)swap1 << 64);
60
61         asm volatile(PLT_CPU_FEATURE_PREAMBLE
62                      "casp %[wdata], %H[wdata], %[wdata], %H[wdata], [%[ptr]]\n"
63                      : [wdata] "+r"(wdata)
64                      : [ptr] "r"(ptr)
65                      : "memory");
66 }
67 #endif
68
69 static __plt_always_inline uint64_t
70 roc_atomic64_cas(uint64_t compare, uint64_t swap, int64_t *ptr)
71 {
72         asm volatile(PLT_CPU_FEATURE_PREAMBLE
73                      "cas %[compare], %[swap], [%[ptr]]\n"
74                      : [compare] "+r"(compare)
75                      : [swap] "r"(swap), [ptr] "r"(ptr)
76                      : "memory");
77
78         return compare;
79 }
80
81 static __plt_always_inline uint64_t
82 roc_atomic64_casl(uint64_t compare, uint64_t swap, int64_t *ptr)
83 {
84         asm volatile(PLT_CPU_FEATURE_PREAMBLE
85                      "casl %[compare], %[swap], [%[ptr]]\n"
86                      : [compare] "+r"(compare)
87                      : [swap] "r"(swap), [ptr] "r"(ptr)
88                      : "memory");
89
90         return compare;
91 }
92
93 static __plt_always_inline uint64_t
94 roc_atomic64_add_nosync(int64_t incr, int64_t *ptr)
95 {
96         uint64_t result;
97
98         /* Atomic add with no ordering */
99         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldadd %x[i], %x[r], [%[b]]"
100                      : [r] "=r"(result), "+m"(*ptr)
101                      : [i] "r"(incr), [b] "r"(ptr)
102                      : "memory");
103         return result;
104 }
105
106 static __plt_always_inline uint64_t
107 roc_atomic64_add_sync(int64_t incr, int64_t *ptr)
108 {
109         uint64_t result;
110
111         /* Atomic add with ordering */
112         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldadda %x[i], %x[r], [%[b]]"
113                      : [r] "=r"(result), "+m"(*ptr)
114                      : [i] "r"(incr), [b] "r"(ptr)
115                      : "memory");
116         return result;
117 }
118
119 static __plt_always_inline uint64_t
120 roc_lmt_submit_ldeor(plt_iova_t io_address)
121 {
122         uint64_t result;
123
124         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeor xzr, %x[rf], [%[rs]]"
125                      : [rf] "=r"(result)
126                      : [rs] "r"(io_address));
127         return result;
128 }
129
130 static __plt_always_inline uint64_t
131 roc_lmt_submit_ldeorl(plt_iova_t io_address)
132 {
133         uint64_t result;
134
135         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeorl xzr,%x[rf],[%[rs]]"
136                      : [rf] "=r"(result)
137                      : [rs] "r"(io_address));
138         return result;
139 }
140
141 static __plt_always_inline void
142 roc_lmt_submit_steor(uint64_t data, plt_iova_t io_address)
143 {
144         asm volatile(PLT_CPU_FEATURE_PREAMBLE
145                      "steor %x[d], [%[rs]]" ::[d] "r"(data),
146                      [rs] "r"(io_address));
147 }
148
149 static __plt_always_inline void
150 roc_lmt_submit_steorl(uint64_t data, plt_iova_t io_address)
151 {
152         asm volatile(PLT_CPU_FEATURE_PREAMBLE
153                      "steorl %x[d], [%[rs]]" ::[d] "r"(data),
154                      [rs] "r"(io_address));
155 }
156
157 static __plt_always_inline void
158 roc_lmt_mov(void *out, const void *in, const uint32_t lmtext)
159 {
160         volatile const __uint128_t *src128 = (const __uint128_t *)in;
161         volatile __uint128_t *dst128 = (__uint128_t *)out;
162
163         dst128[0] = src128[0];
164         dst128[1] = src128[1];
165         /* lmtext receives following value:
166          * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
167          * 2: NIX_SUBDC_EXT + NIX_SUBDC_MEM i.e. tstamp case
168          */
169         if (lmtext) {
170                 dst128[2] = src128[2];
171                 if (lmtext > 1)
172                         dst128[3] = src128[3];
173         }
174 }
175
176 static __plt_always_inline void
177 roc_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
178 {
179         volatile const __uint128_t *src128 = (const __uint128_t *)in;
180         volatile __uint128_t *dst128 = (__uint128_t *)out;
181         uint8_t i;
182
183         for (i = 0; i < segdw; i++)
184                 dst128[i] = src128[i];
185 }
186
187 static __plt_always_inline void
188 roc_lmt_mov_one(void *out, const void *in)
189 {
190         volatile const __uint128_t *src128 = (const __uint128_t *)in;
191         volatile __uint128_t *dst128 = (__uint128_t *)out;
192
193         *dst128 = *src128;
194 }
195
196 /* Non volatile version of roc_lmt_mov_seg() */
197 static __plt_always_inline void
198 roc_lmt_mov_seg_nv(void *out, const void *in, const uint16_t segdw)
199 {
200         const __uint128_t *src128 = (const __uint128_t *)in;
201         __uint128_t *dst128 = (__uint128_t *)out;
202         uint8_t i;
203
204         for (i = 0; i < segdw; i++)
205                 dst128[i] = src128[i];
206 }
207
208 static __plt_always_inline void
209 roc_atf_ret(void)
210 {
211         /* This will allow wfi in EL0 to cause async exception to EL3
212          * which will optionally perform necessary actions.
213          */
214         __asm("wfi");
215 }
216
217 #endif /* _ROC_IO_H_ */