1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #define ROC_AURA_ID_MASK (BIT_ULL(16) - 1)
9 #define ROC_AURA_OP_LIMIT_MASK (BIT_ULL(36) - 1)
12 * Generate 64bit handle to have optimized alloc and free aura operation.
13 * 0 - ROC_AURA_ID_MASK for storing the aura_id.
14 * [ROC_AURA_ID_MASK+1, (2^64 - 1)] for storing the lf base address.
15 * This scheme is valid when OS can give ROC_AURA_ID_MASK
16 * aligned address for lf base address.
18 static inline uint64_t
19 roc_npa_aura_handle_gen(uint32_t aura_id, uintptr_t addr)
23 val = aura_id & ROC_AURA_ID_MASK;
24 return (uint64_t)addr | val;
27 static inline uint64_t
28 roc_npa_aura_handle_to_aura(uint64_t aura_handle)
30 return aura_handle & ROC_AURA_ID_MASK;
33 static inline uintptr_t
34 roc_npa_aura_handle_to_base(uint64_t aura_handle)
36 return (uintptr_t)(aura_handle & ~ROC_AURA_ID_MASK);
39 static inline uint64_t
40 roc_npa_aura_op_alloc(uint64_t aura_handle, const int drop)
42 uint64_t wdata = roc_npa_aura_handle_to_aura(aura_handle);
46 wdata |= BIT_ULL(63); /* DROP */
48 addr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) +
49 NPA_LF_AURA_OP_ALLOCX(0));
50 return roc_atomic64_add_nosync(wdata, addr);
54 roc_npa_aura_op_free(uint64_t aura_handle, const int fabs, uint64_t iova)
56 uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
58 roc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_OP_FREE0;
60 reg |= BIT_ULL(63); /* FABS */
62 roc_store_pair(iova, reg, addr);
65 static inline uint64_t
66 roc_npa_aura_op_cnt_get(uint64_t aura_handle)
72 wdata = roc_npa_aura_handle_to_aura(aura_handle) << 44;
73 addr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) +
75 reg = roc_atomic64_add_nosync(wdata, addr);
77 if (reg & BIT_ULL(42) /* OP_ERR */)
80 return reg & 0xFFFFFFFFF;
84 roc_npa_aura_op_cnt_set(uint64_t aura_handle, const int sign, uint64_t count)
86 uint64_t reg = count & (BIT_ULL(36) - 1);
89 reg |= BIT_ULL(43); /* CNT_ADD */
91 reg |= (roc_npa_aura_handle_to_aura(aura_handle) << 44);
93 plt_write64(reg, roc_npa_aura_handle_to_base(aura_handle) +
97 static inline uint64_t
98 roc_npa_aura_op_limit_get(uint64_t aura_handle)
104 wdata = roc_npa_aura_handle_to_aura(aura_handle) << 44;
105 addr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) +
106 NPA_LF_AURA_OP_LIMIT);
107 reg = roc_atomic64_add_nosync(wdata, addr);
109 if (reg & BIT_ULL(42) /* OP_ERR */)
112 return reg & ROC_AURA_OP_LIMIT_MASK;
116 roc_npa_aura_op_limit_set(uint64_t aura_handle, uint64_t limit)
118 uint64_t reg = limit & ROC_AURA_OP_LIMIT_MASK;
120 reg |= (roc_npa_aura_handle_to_aura(aura_handle) << 44);
122 plt_write64(reg, roc_npa_aura_handle_to_base(aura_handle) +
123 NPA_LF_AURA_OP_LIMIT);
126 static inline uint64_t
127 roc_npa_aura_op_available(uint64_t aura_handle)
133 wdata = roc_npa_aura_handle_to_aura(aura_handle) << 44;
134 addr = (int64_t *)(roc_npa_aura_handle_to_base(aura_handle) +
135 NPA_LF_POOL_OP_AVAILABLE);
136 reg = roc_atomic64_add_nosync(wdata, addr);
138 if (reg & BIT_ULL(42) /* OP_ERR */)
141 return reg & 0xFFFFFFFFF;
145 struct plt_pci_device *pci_dev;
147 #define ROC_NPA_MEM_SZ (1 * 1024)
148 uint8_t reserved[ROC_NPA_MEM_SZ] __plt_cache_aligned;
149 } __plt_cache_aligned;
151 int __roc_api roc_npa_dev_init(struct roc_npa *roc_npa);
152 int __roc_api roc_npa_dev_fini(struct roc_npa *roc_npa);
155 int __roc_api roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
156 uint32_t block_count, struct npa_aura_s *aura,
157 struct npa_pool_s *pool);
158 int __roc_api roc_npa_aura_limit_modify(uint64_t aura_handle,
159 uint16_t aura_limit);
160 int __roc_api roc_npa_pool_destroy(uint64_t aura_handle);
161 int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
162 void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
167 int __roc_api roc_npa_ctx_dump(void);
168 int __roc_api roc_npa_dump(void);
170 #endif /* _ROC_NPA_H_ */