#define otx2_prefetch_store_keep(ptr) ({\
asm volatile("prfm pstl1keep, [%x0]\n" : : "r" (ptr)); })
+#if defined(__ARM_FEATURE_SVE)
+#define __LSE_PREAMBLE " .cpu generic+lse+sve\n"
+#else
+#define __LSE_PREAMBLE " .cpu generic+lse\n"
+#endif
+
static __rte_always_inline uint64_t
otx2_atomic64_add_nosync(int64_t incr, int64_t *ptr)
{
/* Atomic add with no ordering */
asm volatile (
- ".cpu generic+lse\n"
+ __LSE_PREAMBLE
"ldadd %x[i], %x[r], [%[b]]"
: [r] "=r" (result), "+m" (*ptr)
: [i] "r" (incr), [b] "r" (ptr)
/* Atomic add with ordering */
asm volatile (
- ".cpu generic+lse\n"
+ __LSE_PREAMBLE
"ldadda %x[i], %x[r], [%[b]]"
: [r] "=r" (result), "+m" (*ptr)
: [i] "r" (incr), [b] "r" (ptr)
uint64_t result;
asm volatile (
- ".cpu generic+lse\n"
+ __LSE_PREAMBLE
"ldeor xzr,%x[rf],[%[rs]]" :
[rf] "=r"(result): [rs] "r"(io_address));
return result;
uint64_t result;
asm volatile (
- ".cpu generic+lse\n"
+ __LSE_PREAMBLE
"ldeorl xzr,%x[rf],[%[rs]]" :
[rf] "=r"(result) : [rs] "r"(io_address));
return result;
dst128[i] = src128[i];
}
+#undef __LSE_PREAMBLE
#endif /* _OTX2_IO_ARM64_H_ */