1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #ifndef __OCTEONTX_IO_H__
6 #define __OCTEONTX_IO_H__
13 /* In Cavium OCTEON TX SoC, all accesses to the device registers are
14 * implicitly strongly ordered. So, The relaxed version of IO operation is
15 * safe to use with out any IO memory barriers.
17 #define octeontx_read64 rte_read64_relaxed
18 #define octeontx_write64 rte_write64_relaxed
20 /* ARM64 specific functions */
21 #if defined(RTE_ARCH_ARM64)
22 #define octeontx_prefetch_store_keep(_ptr) ({\
23 asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
25 #define octeontx_load_pair(val0, val1, addr) ({ \
27 "ldp %x[x0], %x[x1], [%x[p1]]" \
28 :[x0]"=r"(val0), [x1]"=r"(val1) \
32 #define octeontx_store_pair(val0, val1, addr) ({ \
34 "stp %x[x0], %x[x1], [%x[p1]]" \
35 ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
37 #else /* Un optimized functions for building on non arm64 arch */
39 #define octeontx_prefetch_store_keep(_ptr) do {} while (0)
41 #define octeontx_load_pair(val0, val1, addr) \
43 val0 = rte_read64(addr); \
44 val1 = rte_read64(((uint8_t *)addr) + 8); \
47 #define octeontx_store_pair(val0, val1, addr) \
49 rte_write64(val0, addr); \
50 rte_write64(val1, (((uint8_t *)addr) + 8)); \
54 #if defined(RTE_ARCH_ARM64)
55 #if defined(__ARM_FEATURE_SVE)
56 #define __LSE_PREAMBLE " .cpu generic+lse+sve\n"
58 #define __LSE_PREAMBLE " .cpu generic+lse\n"
61 * Perform an atomic fetch-and-add operation.
63 static inline uint64_t
64 octeontx_reg_ldadd_u64(void *addr, int64_t off)
70 " ldadd %1, %0, [%2]\n"
71 : "=r" (old_val) : "r" (off), "r" (addr) : "memory");
77 * Perform a LMTST operation - an atomic write of up to 128 byte to
78 * an I/O block that supports this operation type.
80 * @param lmtline_va is the address where LMTLINE is mapped
81 * @param ioreg_va is the virtual address of the device register
82 * @param cmdbuf is the array of peripheral commands to execute
83 * @param cmdsize is the number of 64-bit words in 'cmdbuf'
88 octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[],
93 uint64_t *lmtline = lmtline_va;
98 /* Copy commands to LMTLINE */
99 for (result = 0; result < word_count; result += 2) {
100 lmtline[result + 0] = cmdbuf[result + 0];
101 lmtline[result + 1] = cmdbuf[result + 1];
104 /* LDEOR initiates atomic transfer to I/O device */
107 " ldeor xzr, %0, [%1]\n"
108 : "=r" (result) : "r" (ioreg_va) : "memory");
112 #undef __LSE_PREAMBLE
115 static inline uint64_t
116 octeontx_reg_ldadd_u64(void *addr, int64_t off)
124 octeontx_reg_lmtst(void *lmtline_va, void *ioreg_va, const uint64_t cmdbuf[],
127 RTE_SET_USED(lmtline_va);
128 RTE_SET_USED(ioreg_va);
129 RTE_SET_USED(cmdbuf);
130 RTE_SET_USED(cmdsize);
134 #endif /* __OCTEONTX_IO_H__ */