(lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2); \
} while (0)
+#define ROC_LMT_CPT_BASE_ID_GET(lmt_addr, lmt_id) \
+ do { \
+ /* 16 Lines per core */ \
+ lmt_id = ROC_LMT_CPT_BASE_ID_OFF; \
+ lmt_id += (plt_lcore_id() << ROC_LMT_CPT_LINES_PER_CORE_LOG2); \
+ /* Each line is of 128B */ \
+ (lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2); \
+ } while (0)
+
#define roc_load_pair(val0, val1, addr) \
({ \
asm volatile("ldp %x[x0], %x[x1], [%x[p1]]" \
return compare;
}
+static __plt_always_inline uint64_t
+roc_atomic64_casl(uint64_t compare, uint64_t swap, int64_t *ptr)
+{
+ asm volatile(PLT_CPU_FEATURE_PREAMBLE
+ "casl %[compare], %[swap], [%[ptr]]\n"
+ : [compare] "+r"(compare)
+ : [swap] "r"(swap), [ptr] "r"(ptr)
+ : "memory");
+
+ return compare;
+}
+
static __plt_always_inline uint64_t
roc_atomic64_add_nosync(int64_t incr, int64_t *ptr)
{
dst128[1] = src128[1];
/* lmtext receives following value:
* 1: NIX_SUBDC_EXT needed i.e. tx vlan case
- * 2: NIX_SUBDC_EXT + NIX_SUBDC_MEM i.e. tstamp case
*/
- if (lmtext) {
+ if (lmtext)
+ dst128[2] = src128[2];
+}
+
+static __plt_always_inline void
+roc_lmt_mov64(void *out, const void *in)
+{
+ volatile const __uint128_t *src128 = (const __uint128_t *)in;
+ volatile __uint128_t *dst128 = (__uint128_t *)out;
+
+ dst128[0] = src128[0];
+ dst128[1] = src128[1];
+ dst128[2] = src128[2];
+ dst128[3] = src128[3];
+}
+
+static __plt_always_inline void
+roc_lmt_mov_nv(void *out, const void *in, const uint32_t lmtext)
+{
+ const __uint128_t *src128 = (const __uint128_t *)in;
+ __uint128_t *dst128 = (__uint128_t *)out;
+
+ dst128[0] = src128[0];
+ dst128[1] = src128[1];
+ /* lmtext receives following value:
+ * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
+ */
+ if (lmtext)
dst128[2] = src128[2];
- if (lmtext > 1)
- dst128[3] = src128[3];
- }
}
static __plt_always_inline void