1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #define ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id) \
10 /* 32 Lines per core */ \
11 lmt_id = plt_lcore_id() << ROC_LMT_LINES_PER_CORE_LOG2; \
12 /* Each line is of 128B */ \
13 (lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2); \
16 #define ROC_LMT_CPT_BASE_ID_GET(lmt_addr, lmt_id) \
18 /* 16 Lines per core */ \
19 lmt_id = ROC_LMT_CPT_BASE_ID_OFF; \
20 lmt_id += (plt_lcore_id() << ROC_LMT_CPT_LINES_PER_CORE_LOG2); \
21 /* Each line is of 128B */ \
22 (lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2); \
25 #define roc_load_pair(val0, val1, addr) \
27 asm volatile("ldp %x[x0], %x[x1], [%x[p1]]" \
28 : [x0] "=r"(val0), [x1] "=r"(val1) \
32 #define roc_store_pair(val0, val1, addr) \
35 "stp %x[x0], %x[x1], [%x[p1], #0]!" ::[x0] "r"(val0), \
36 [x1] "r"(val1), [p1] "r"(addr)); \
39 #define roc_prefetch_store_keep(ptr) \
40 ({ asm volatile("prfm pstl1keep, [%x0]\n" : : "r"(ptr)); })
42 #if defined(__clang__)
43 static __plt_always_inline void
44 roc_atomic128_cas_noreturn(uint64_t swap0, uint64_t swap1, int64_t *ptr)
46 register uint64_t x0 __asm("x0") = swap0;
47 register uint64_t x1 __asm("x1") = swap1;
49 asm volatile(PLT_CPU_FEATURE_PREAMBLE
50 "casp %[x0], %[x1], %[x0], %[x1], [%[ptr]]\n"
51 : [x0] "+r"(x0), [x1] "+r"(x1)
56 static __plt_always_inline void
57 roc_atomic128_cas_noreturn(uint64_t swap0, uint64_t swap1, uint64_t ptr)
59 __uint128_t wdata = swap0 | ((__uint128_t)swap1 << 64);
61 asm volatile(PLT_CPU_FEATURE_PREAMBLE
62 "casp %[wdata], %H[wdata], %[wdata], %H[wdata], [%[ptr]]\n"
69 static __plt_always_inline uint64_t
70 roc_atomic64_cas(uint64_t compare, uint64_t swap, int64_t *ptr)
72 asm volatile(PLT_CPU_FEATURE_PREAMBLE
73 "cas %[compare], %[swap], [%[ptr]]\n"
74 : [compare] "+r"(compare)
75 : [swap] "r"(swap), [ptr] "r"(ptr)
81 static __plt_always_inline uint64_t
82 roc_atomic64_casl(uint64_t compare, uint64_t swap, int64_t *ptr)
84 asm volatile(PLT_CPU_FEATURE_PREAMBLE
85 "casl %[compare], %[swap], [%[ptr]]\n"
86 : [compare] "+r"(compare)
87 : [swap] "r"(swap), [ptr] "r"(ptr)
93 static __plt_always_inline uint64_t
94 roc_atomic64_add_nosync(int64_t incr, int64_t *ptr)
98 /* Atomic add with no ordering */
99 asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldadd %x[i], %x[r], [%[b]]"
100 : [r] "=r"(result), "+m"(*ptr)
101 : [i] "r"(incr), [b] "r"(ptr)
106 static __plt_always_inline uint64_t
107 roc_atomic64_add_sync(int64_t incr, int64_t *ptr)
111 /* Atomic add with ordering */
112 asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldadda %x[i], %x[r], [%[b]]"
113 : [r] "=r"(result), "+m"(*ptr)
114 : [i] "r"(incr), [b] "r"(ptr)
119 static __plt_always_inline uint64_t
120 roc_lmt_submit_ldeor(plt_iova_t io_address)
124 asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeor xzr, %x[rf], [%[rs]]"
126 : [rs] "r"(io_address));
130 static __plt_always_inline uint64_t
131 roc_lmt_submit_ldeorl(plt_iova_t io_address)
135 asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeorl xzr,%x[rf],[%[rs]]"
137 : [rs] "r"(io_address));
141 static __plt_always_inline void
142 roc_lmt_submit_steor(uint64_t data, plt_iova_t io_address)
144 asm volatile(PLT_CPU_FEATURE_PREAMBLE
145 "steor %x[d], [%[rs]]" ::[d] "r"(data),
146 [rs] "r"(io_address));
149 static __plt_always_inline void
150 roc_lmt_submit_steorl(uint64_t data, plt_iova_t io_address)
152 asm volatile(PLT_CPU_FEATURE_PREAMBLE
153 "steorl %x[d], [%[rs]]" ::[d] "r"(data),
154 [rs] "r"(io_address));
157 static __plt_always_inline void
158 roc_lmt_mov(void *out, const void *in, const uint32_t lmtext)
160 volatile const __uint128_t *src128 = (const __uint128_t *)in;
161 volatile __uint128_t *dst128 = (__uint128_t *)out;
163 dst128[0] = src128[0];
164 dst128[1] = src128[1];
165 /* lmtext receives following value:
166 * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
169 dst128[2] = src128[2];
172 static __plt_always_inline void
173 roc_lmt_mov64(void *out, const void *in)
175 volatile const __uint128_t *src128 = (const __uint128_t *)in;
176 volatile __uint128_t *dst128 = (__uint128_t *)out;
178 dst128[0] = src128[0];
179 dst128[1] = src128[1];
180 dst128[2] = src128[2];
181 dst128[3] = src128[3];
184 static __plt_always_inline void
185 roc_lmt_mov_nv(void *out, const void *in, const uint32_t lmtext)
187 const __uint128_t *src128 = (const __uint128_t *)in;
188 __uint128_t *dst128 = (__uint128_t *)out;
190 dst128[0] = src128[0];
191 dst128[1] = src128[1];
192 /* lmtext receives following value:
193 * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
196 dst128[2] = src128[2];
199 static __plt_always_inline void
200 roc_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
202 volatile const __uint128_t *src128 = (const __uint128_t *)in;
203 volatile __uint128_t *dst128 = (__uint128_t *)out;
206 for (i = 0; i < segdw; i++)
207 dst128[i] = src128[i];
210 static __plt_always_inline void
211 roc_lmt_mov_one(void *out, const void *in)
213 volatile const __uint128_t *src128 = (const __uint128_t *)in;
214 volatile __uint128_t *dst128 = (__uint128_t *)out;
219 /* Non volatile version of roc_lmt_mov_seg() */
220 static __plt_always_inline void
221 roc_lmt_mov_seg_nv(void *out, const void *in, const uint16_t segdw)
223 const __uint128_t *src128 = (const __uint128_t *)in;
224 __uint128_t *dst128 = (__uint128_t *)out;
227 for (i = 0; i < segdw; i++)
228 dst128[i] = src128[i];
231 static __plt_always_inline void
234 /* This will allow wfi in EL0 to cause async exception to EL3
235 * which will optionally perform necessary actions.
240 #endif /* _ROC_IO_H_ */