net/mlx5: remove redundant flag in device config
[dpdk.git] / drivers / common / cnxk / roc_io.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _ROC_IO_H_
6 #define _ROC_IO_H_
7
8 #define ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id)                                  \
9         do {                                                                   \
10                 /* 32 Lines per core */                                        \
11                 lmt_id = plt_lcore_id() << ROC_LMT_LINES_PER_CORE_LOG2;        \
12                 /* Each line is of 128B */                                     \
13                 (lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2);    \
14         } while (0)
15
16 #define ROC_LMT_CPT_BASE_ID_GET(lmt_addr, lmt_id)                              \
17         do {                                                                   \
18                 /* 16 Lines per core */                                        \
19                 lmt_id = ROC_LMT_CPT_BASE_ID_OFF;                              \
20                 lmt_id += (plt_lcore_id() << ROC_LMT_CPT_LINES_PER_CORE_LOG2); \
21                 /* Each line is of 128B */                                     \
22                 (lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2);    \
23         } while (0)
24
25 #define roc_load_pair(val0, val1, addr)                                        \
26         ({                                                                     \
27                 asm volatile("ldp %x[x0], %x[x1], [%x[p1]]"                    \
28                              : [x0] "=r"(val0), [x1] "=r"(val1)                \
29                              : [p1] "r"(addr));                                \
30         })
31
32 #define roc_store_pair(val0, val1, addr)                                       \
33         ({                                                                     \
34                 asm volatile(                                                  \
35                         "stp %x[x0], %x[x1], [%x[p1], #0]!" ::[x0] "r"(val0),  \
36                         [x1] "r"(val1), [p1] "r"(addr));                       \
37         })
38
39 #define roc_prefetch_store_keep(ptr)                                           \
40         ({ asm volatile("prfm pstl1keep, [%x0]\n" : : "r"(ptr)); })
41
42 #if defined(__clang__)
43 static __plt_always_inline void
44 roc_atomic128_cas_noreturn(uint64_t swap0, uint64_t swap1, int64_t *ptr)
45 {
46         register uint64_t x0 __asm("x0") = swap0;
47         register uint64_t x1 __asm("x1") = swap1;
48
49         asm volatile(PLT_CPU_FEATURE_PREAMBLE
50                      "casp %[x0], %[x1], %[x0], %[x1], [%[ptr]]\n"
51                      : [x0] "+r"(x0), [x1] "+r"(x1)
52                      : [ptr] "r"(ptr)
53                      : "memory");
54 }
55 #else
56 static __plt_always_inline void
57 roc_atomic128_cas_noreturn(uint64_t swap0, uint64_t swap1, uint64_t ptr)
58 {
59         __uint128_t wdata = swap0 | ((__uint128_t)swap1 << 64);
60
61         asm volatile(PLT_CPU_FEATURE_PREAMBLE
62                      "casp %[wdata], %H[wdata], %[wdata], %H[wdata], [%[ptr]]\n"
63                      : [wdata] "+r"(wdata)
64                      : [ptr] "r"(ptr)
65                      : "memory");
66 }
67 #endif
68
69 static __plt_always_inline uint64_t
70 roc_atomic64_cas(uint64_t compare, uint64_t swap, int64_t *ptr)
71 {
72         asm volatile(PLT_CPU_FEATURE_PREAMBLE
73                      "cas %[compare], %[swap], [%[ptr]]\n"
74                      : [compare] "+r"(compare)
75                      : [swap] "r"(swap), [ptr] "r"(ptr)
76                      : "memory");
77
78         return compare;
79 }
80
81 static __plt_always_inline uint64_t
82 roc_atomic64_add_nosync(int64_t incr, int64_t *ptr)
83 {
84         uint64_t result;
85
86         /* Atomic add with no ordering */
87         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldadd %x[i], %x[r], [%[b]]"
88                      : [r] "=r"(result), "+m"(*ptr)
89                      : [i] "r"(incr), [b] "r"(ptr)
90                      : "memory");
91         return result;
92 }
93
94 static __plt_always_inline uint64_t
95 roc_atomic64_add_sync(int64_t incr, int64_t *ptr)
96 {
97         uint64_t result;
98
99         /* Atomic add with ordering */
100         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldadda %x[i], %x[r], [%[b]]"
101                      : [r] "=r"(result), "+m"(*ptr)
102                      : [i] "r"(incr), [b] "r"(ptr)
103                      : "memory");
104         return result;
105 }
106
107 static __plt_always_inline uint64_t
108 roc_lmt_submit_ldeor(plt_iova_t io_address)
109 {
110         uint64_t result;
111
112         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeor xzr, %x[rf], [%[rs]]"
113                      : [rf] "=r"(result)
114                      : [rs] "r"(io_address));
115         return result;
116 }
117
118 static __plt_always_inline uint64_t
119 roc_lmt_submit_ldeorl(plt_iova_t io_address)
120 {
121         uint64_t result;
122
123         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeorl xzr,%x[rf],[%[rs]]"
124                      : [rf] "=r"(result)
125                      : [rs] "r"(io_address));
126         return result;
127 }
128
129 static __plt_always_inline void
130 roc_lmt_submit_steor(uint64_t data, plt_iova_t io_address)
131 {
132         asm volatile(PLT_CPU_FEATURE_PREAMBLE
133                      "steor %x[d], [%[rs]]" ::[d] "r"(data),
134                      [rs] "r"(io_address));
135 }
136
137 static __plt_always_inline void
138 roc_lmt_submit_steorl(uint64_t data, plt_iova_t io_address)
139 {
140         asm volatile(PLT_CPU_FEATURE_PREAMBLE
141                      "steorl %x[d], [%[rs]]" ::[d] "r"(data),
142                      [rs] "r"(io_address));
143 }
144
145 static __plt_always_inline void
146 roc_lmt_mov(void *out, const void *in, const uint32_t lmtext)
147 {
148         volatile const __uint128_t *src128 = (const __uint128_t *)in;
149         volatile __uint128_t *dst128 = (__uint128_t *)out;
150
151         dst128[0] = src128[0];
152         dst128[1] = src128[1];
153         /* lmtext receives following value:
154          * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
155          * 2: NIX_SUBDC_EXT + NIX_SUBDC_MEM i.e. tstamp case
156          */
157         if (lmtext) {
158                 dst128[2] = src128[2];
159                 if (lmtext > 1)
160                         dst128[3] = src128[3];
161         }
162 }
163
164 static __plt_always_inline void
165 roc_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
166 {
167         volatile const __uint128_t *src128 = (const __uint128_t *)in;
168         volatile __uint128_t *dst128 = (__uint128_t *)out;
169         uint8_t i;
170
171         for (i = 0; i < segdw; i++)
172                 dst128[i] = src128[i];
173 }
174
175 static __plt_always_inline void
176 roc_lmt_mov_one(void *out, const void *in)
177 {
178         volatile const __uint128_t *src128 = (const __uint128_t *)in;
179         volatile __uint128_t *dst128 = (__uint128_t *)out;
180
181         *dst128 = *src128;
182 }
183
184 /* Non volatile version of roc_lmt_mov_seg() */
185 static __plt_always_inline void
186 roc_lmt_mov_seg_nv(void *out, const void *in, const uint16_t segdw)
187 {
188         const __uint128_t *src128 = (const __uint128_t *)in;
189         __uint128_t *dst128 = (__uint128_t *)out;
190         uint8_t i;
191
192         for (i = 0; i < segdw; i++)
193                 dst128[i] = src128[i];
194 }
195
196 static __plt_always_inline void
197 roc_atf_ret(void)
198 {
199         /* This will allow wfi in EL0 to cause async exception to EL3
200          * which will optionally perform necessary actions.
201          */
202         __asm("wfi");
203 }
204
205 #endif /* _ROC_IO_H_ */