common/cnxk: support setting BPHY CGX/RPM FEC
[dpdk.git] / drivers / common / cnxk / roc_io.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef _ROC_IO_H_
6 #define _ROC_IO_H_
7
8 #define ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id)                                  \
9         do {                                                                   \
10                 /* 32 Lines per core */                                        \
11                 lmt_id = plt_lcore_id() << ROC_LMT_LINES_PER_CORE_LOG2;        \
12                 /* Each line is of 128B */                                     \
13                 (lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2);    \
14         } while (0)
15
16 #define roc_load_pair(val0, val1, addr)                                        \
17         ({                                                                     \
18                 asm volatile("ldp %x[x0], %x[x1], [%x[p1]]"                    \
19                              : [x0] "=r"(val0), [x1] "=r"(val1)                \
20                              : [p1] "r"(addr));                                \
21         })
22
23 #define roc_store_pair(val0, val1, addr)                                       \
24         ({                                                                     \
25                 asm volatile(                                                  \
26                         "stp %x[x0], %x[x1], [%x[p1], #0]!" ::[x0] "r"(val0),  \
27                         [x1] "r"(val1), [p1] "r"(addr));                       \
28         })
29
30 #define roc_prefetch_store_keep(ptr)                                           \
31         ({ asm volatile("prfm pstl1keep, [%x0]\n" : : "r"(ptr)); })
32
33 #if defined(__clang__)
34 static __plt_always_inline void
35 roc_atomic128_cas_noreturn(uint64_t swap0, uint64_t swap1, int64_t *ptr)
36 {
37         register uint64_t x0 __asm("x0") = swap0;
38         register uint64_t x1 __asm("x1") = swap1;
39
40         asm volatile(PLT_CPU_FEATURE_PREAMBLE
41                      "casp %[x0], %[x1], %[x0], %[x1], [%[ptr]]\n"
42                      : [x0] "+r"(x0), [x1] "+r"(x1)
43                      : [ptr] "r"(ptr)
44                      : "memory");
45 }
46 #else
47 static __plt_always_inline void
48 roc_atomic128_cas_noreturn(uint64_t swap0, uint64_t swap1, uint64_t ptr)
49 {
50         __uint128_t wdata = swap0 | ((__uint128_t)swap1 << 64);
51
52         asm volatile(PLT_CPU_FEATURE_PREAMBLE
53                      "casp %[wdata], %H[wdata], %[wdata], %H[wdata], [%[ptr]]\n"
54                      : [wdata] "+r"(wdata)
55                      : [ptr] "r"(ptr)
56                      : "memory");
57 }
58 #endif
59
60 static __plt_always_inline uint64_t
61 roc_atomic64_cas(uint64_t compare, uint64_t swap, int64_t *ptr)
62 {
63         asm volatile(PLT_CPU_FEATURE_PREAMBLE
64                      "cas %[compare], %[swap], [%[ptr]]\n"
65                      : [compare] "+r"(compare)
66                      : [swap] "r"(swap), [ptr] "r"(ptr)
67                      : "memory");
68
69         return compare;
70 }
71
72 static __plt_always_inline uint64_t
73 roc_atomic64_add_nosync(int64_t incr, int64_t *ptr)
74 {
75         uint64_t result;
76
77         /* Atomic add with no ordering */
78         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldadd %x[i], %x[r], [%[b]]"
79                      : [r] "=r"(result), "+m"(*ptr)
80                      : [i] "r"(incr), [b] "r"(ptr)
81                      : "memory");
82         return result;
83 }
84
85 static __plt_always_inline uint64_t
86 roc_atomic64_add_sync(int64_t incr, int64_t *ptr)
87 {
88         uint64_t result;
89
90         /* Atomic add with ordering */
91         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldadda %x[i], %x[r], [%[b]]"
92                      : [r] "=r"(result), "+m"(*ptr)
93                      : [i] "r"(incr), [b] "r"(ptr)
94                      : "memory");
95         return result;
96 }
97
98 static __plt_always_inline uint64_t
99 roc_lmt_submit_ldeor(plt_iova_t io_address)
100 {
101         uint64_t result;
102
103         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeor xzr, %x[rf], [%[rs]]"
104                      : [rf] "=r"(result)
105                      : [rs] "r"(io_address));
106         return result;
107 }
108
109 static __plt_always_inline uint64_t
110 roc_lmt_submit_ldeorl(plt_iova_t io_address)
111 {
112         uint64_t result;
113
114         asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeorl xzr,%x[rf],[%[rs]]"
115                      : [rf] "=r"(result)
116                      : [rs] "r"(io_address));
117         return result;
118 }
119
120 static __plt_always_inline void
121 roc_lmt_submit_steor(uint64_t data, plt_iova_t io_address)
122 {
123         asm volatile(PLT_CPU_FEATURE_PREAMBLE
124                      "steor %x[d], [%[rs]]" ::[d] "r"(data),
125                      [rs] "r"(io_address));
126 }
127
128 static __plt_always_inline void
129 roc_lmt_submit_steorl(uint64_t data, plt_iova_t io_address)
130 {
131         asm volatile(PLT_CPU_FEATURE_PREAMBLE
132                      "steorl %x[d], [%[rs]]" ::[d] "r"(data),
133                      [rs] "r"(io_address));
134 }
135
136 static __plt_always_inline void
137 roc_lmt_mov(void *out, const void *in, const uint32_t lmtext)
138 {
139         volatile const __uint128_t *src128 = (const __uint128_t *)in;
140         volatile __uint128_t *dst128 = (__uint128_t *)out;
141
142         dst128[0] = src128[0];
143         dst128[1] = src128[1];
144         /* lmtext receives following value:
145          * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
146          * 2: NIX_SUBDC_EXT + NIX_SUBDC_MEM i.e. tstamp case
147          */
148         if (lmtext) {
149                 dst128[2] = src128[2];
150                 if (lmtext > 1)
151                         dst128[3] = src128[3];
152         }
153 }
154
155 static __plt_always_inline void
156 roc_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
157 {
158         volatile const __uint128_t *src128 = (const __uint128_t *)in;
159         volatile __uint128_t *dst128 = (__uint128_t *)out;
160         uint8_t i;
161
162         for (i = 0; i < segdw; i++)
163                 dst128[i] = src128[i];
164 }
165
166 static __plt_always_inline void
167 roc_lmt_mov_one(void *out, const void *in)
168 {
169         volatile const __uint128_t *src128 = (const __uint128_t *)in;
170         volatile __uint128_t *dst128 = (__uint128_t *)out;
171
172         *dst128 = *src128;
173 }
174
175 /* Non volatile version of roc_lmt_mov_seg() */
176 static __plt_always_inline void
177 roc_lmt_mov_seg_nv(void *out, const void *in, const uint16_t segdw)
178 {
179         const __uint128_t *src128 = (const __uint128_t *)in;
180         __uint128_t *dst128 = (__uint128_t *)out;
181         uint8_t i;
182
183         for (i = 0; i < segdw; i++)
184                 dst128[i] = src128[i];
185 }
186
187 static __plt_always_inline void
188 roc_atf_ret(void)
189 {
190         /* This will allow wfi in EL0 to cause async exception to EL3
191          * which will optionally perform necessary actions.
192          */
193         __asm("wfi");
194 }
195
196 #endif /* _ROC_IO_H_ */