1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_mempool.h>
8 #include "otx2_mempool.h"
11 otx2_npa_get_count(const struct rte_mempool *mp)
13 return (unsigned int)npa_lf_aura_op_available(mp->pool_id);
17 npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
18 struct npa_aura_s *aura, struct npa_pool_s *pool)
20 struct npa_aq_enq_req *aura_init_req, *pool_init_req;
21 struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
22 struct otx2_mbox_dev *mdev = &mbox->dev[0];
25 aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
27 aura_init_req->aura_id = aura_id;
28 aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
29 aura_init_req->op = NPA_AQ_INSTOP_INIT;
30 memcpy(&aura_init_req->aura, aura, sizeof(*aura));
32 pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
34 pool_init_req->aura_id = aura_id;
35 pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
36 pool_init_req->op = NPA_AQ_INSTOP_INIT;
37 memcpy(&pool_init_req->pool, pool, sizeof(*pool));
39 otx2_mbox_msg_send(mbox, 0);
40 rc = otx2_mbox_wait_for_rsp(mbox, 0);
44 off = mbox->rx_start +
45 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
46 aura_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
47 off = mbox->rx_start + aura_init_rsp->hdr.next_msgoff;
48 pool_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
50 if (rc == 2 && aura_init_rsp->hdr.rc == 0 && pool_init_rsp->hdr.rc == 0)
53 return NPA_LF_ERR_AURA_POOL_INIT;
57 npa_lf_aura_pool_fini(struct otx2_mbox *mbox,
61 struct npa_aq_enq_req *aura_req, *pool_req;
62 struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
63 struct otx2_mbox_dev *mdev = &mbox->dev[0];
64 struct ndc_sync_op *ndc_req;
67 /* Procedure for disabling an aura/pool */
69 npa_lf_aura_op_alloc(aura_handle, 0);
71 pool_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
72 pool_req->aura_id = aura_id;
73 pool_req->ctype = NPA_AQ_CTYPE_POOL;
74 pool_req->op = NPA_AQ_INSTOP_WRITE;
75 pool_req->pool.ena = 0;
76 pool_req->pool_mask.ena = ~pool_req->pool_mask.ena;
78 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
79 aura_req->aura_id = aura_id;
80 aura_req->ctype = NPA_AQ_CTYPE_AURA;
81 aura_req->op = NPA_AQ_INSTOP_WRITE;
82 aura_req->aura.ena = 0;
83 aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
85 otx2_mbox_msg_send(mbox, 0);
86 rc = otx2_mbox_wait_for_rsp(mbox, 0);
90 off = mbox->rx_start +
91 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
92 pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
94 off = mbox->rx_start + pool_rsp->hdr.next_msgoff;
95 aura_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
97 if (rc != 2 || aura_rsp->hdr.rc != 0 || pool_rsp->hdr.rc != 0)
98 return NPA_LF_ERR_AURA_POOL_FINI;
100 /* Sync NDC-NPA for LF */
101 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
102 ndc_req->npa_lf_sync = 1;
104 rc = otx2_mbox_process(mbox);
106 otx2_err("Error on NDC-NPA LF sync, rc %d", rc);
107 return NPA_LF_ERR_AURA_POOL_FINI;
113 npa_lf_stack_memzone_name(struct otx2_npa_lf *lf, int pool_id, char *name)
115 snprintf(name, RTE_MEMZONE_NAMESIZE, "otx2_npa_stack_%x_%d",
116 lf->pf_func, pool_id);
121 static inline const struct rte_memzone *
122 npa_lf_stack_dma_alloc(struct otx2_npa_lf *lf, char *name,
123 int pool_id, size_t size)
125 return rte_memzone_reserve_aligned(
126 npa_lf_stack_memzone_name(lf, pool_id, name), size, 0,
127 RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
131 npa_lf_stack_dma_free(struct otx2_npa_lf *lf, char *name, int pool_id)
133 const struct rte_memzone *mz;
135 mz = rte_memzone_lookup(npa_lf_stack_memzone_name(lf, pool_id, name));
139 return rte_memzone_free(mz);
143 bitmap_ctzll(uint64_t slab)
148 return __builtin_ctzll(slab);
152 npa_lf_aura_pool_pair_alloc(struct otx2_npa_lf *lf, const uint32_t block_size,
153 const uint32_t block_count, struct npa_aura_s *aura,
154 struct npa_pool_s *pool, uint64_t *aura_handle)
156 int rc, aura_id, pool_id, stack_size, alloc_size;
157 char name[RTE_MEMZONE_NAMESIZE];
158 const struct rte_memzone *mz;
163 if (!lf || !block_size || !block_count ||
164 !pool || !aura || !aura_handle)
165 return NPA_LF_ERR_PARAM;
167 /* Block size should be cache line aligned and in range of 128B-128KB */
168 if (block_size % OTX2_ALIGN || block_size < 128 ||
169 block_size > 128 * 1024)
170 return NPA_LF_ERR_INVALID_BLOCK_SZ;
173 /* Scan from the beginning */
174 __rte_bitmap_scan_init(lf->npa_bmp);
175 /* Scan bitmap to get the free pool */
176 rc = rte_bitmap_scan(lf->npa_bmp, &pos, &slab);
179 otx2_err("Mempools exhausted, 'max_pools' devargs to increase");
183 /* Get aura_id from resource bitmap */
184 aura_id = pos + bitmap_ctzll(slab);
185 /* Mark pool as reserved */
186 rte_bitmap_clear(lf->npa_bmp, aura_id);
188 /* Configuration based on each aura has separate pool(aura-pool pair) */
190 rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools || aura_id >=
191 (int)BIT_ULL(6 + lf->aura_sz)) ? NPA_LF_ERR_AURA_ID_ALLOC : 0;
195 /* Allocate stack memory */
196 stack_size = (block_count + lf->stack_pg_ptrs - 1) / lf->stack_pg_ptrs;
197 alloc_size = stack_size * lf->stack_pg_bytes;
199 mz = npa_lf_stack_dma_alloc(lf, name, pool_id, alloc_size);
205 /* Update aura fields */
206 aura->pool_addr = pool_id;/* AF will translate to associated poolctx */
208 aura->shift = __builtin_clz(block_count) - 8;
209 aura->limit = block_count;
210 aura->pool_caching = 1;
211 aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
212 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
213 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
214 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
215 /* Many to one reduction */
216 aura->err_qint_idx = aura_id % lf->qints;
218 /* Update pool fields */
219 pool->stack_base = mz->iova;
221 pool->buf_size = block_size / OTX2_ALIGN;
222 pool->stack_max_pages = stack_size;
223 pool->shift = __builtin_clz(block_count) - 8;
226 pool->stack_caching = 1;
227 pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
228 pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
229 pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
231 /* Many to one reduction */
232 pool->err_qint_idx = pool_id % lf->qints;
234 /* Issue AURA_INIT and POOL_INIT op */
235 rc = npa_lf_aura_pool_init(lf->mbox, aura_id, aura, pool);
239 *aura_handle = npa_lf_aura_handle_gen(aura_id, lf->base);
241 /* Update aura count */
242 npa_lf_aura_op_cnt_set(*aura_handle, 0, block_count);
243 /* Read it back to make sure aura count is updated */
244 npa_lf_aura_op_cnt_get(*aura_handle);
249 rte_memzone_free(mz);
251 rte_bitmap_set(lf->npa_bmp, aura_id);
257 npa_lf_aura_pool_pair_free(struct otx2_npa_lf *lf, uint64_t aura_handle)
259 char name[RTE_MEMZONE_NAMESIZE];
260 int aura_id, pool_id, rc;
262 if (!lf || !aura_handle)
263 return NPA_LF_ERR_PARAM;
265 aura_id = pool_id = npa_lf_aura_handle_to_aura(aura_handle);
266 rc = npa_lf_aura_pool_fini(lf->mbox, aura_id, aura_handle);
267 rc |= npa_lf_stack_dma_free(lf, name, pool_id);
269 rte_bitmap_set(lf->npa_bmp, aura_id);
275 otx2_npa_alloc(struct rte_mempool *mp)
277 uint32_t block_size, block_count;
278 struct otx2_npa_lf *lf;
279 struct npa_aura_s aura;
280 struct npa_pool_s pool;
281 uint64_t aura_handle;
284 lf = otx2_npa_lf_obj_get();
290 block_size = mp->elt_size + mp->header_size + mp->trailer_size;
291 block_count = mp->size;
293 if (block_size % OTX2_ALIGN != 0) {
294 otx2_err("Block size should be multiple of 128B");
299 memset(&aura, 0, sizeof(struct npa_aura_s));
300 memset(&pool, 0, sizeof(struct npa_pool_s));
304 if ((uint32_t)pool.buf_offset * OTX2_ALIGN != mp->header_size) {
305 otx2_err("Unsupported mp->header_size=%d", mp->header_size);
310 /* Use driver specific mp->pool_config to override aura config */
311 if (mp->pool_config != NULL)
312 memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
314 rc = npa_lf_aura_pool_pair_alloc(lf, block_size, block_count,
315 &aura, &pool, &aura_handle);
317 otx2_err("Failed to alloc pool or aura rc=%d", rc);
321 /* Store aura_handle for future queue operations */
322 mp->pool_id = aura_handle;
323 otx2_npa_dbg("lf=%p block_sz=%d block_count=%d aura_handle=0x%"PRIx64,
324 lf, block_size, block_count, aura_handle);
326 /* Just hold the reference of the object */
327 otx2_npa_lf_obj_ref();
334 otx2_npa_free(struct rte_mempool *mp)
336 struct otx2_npa_lf *lf = otx2_npa_lf_obj_get();
339 otx2_npa_dbg("lf=%p aura_handle=0x%"PRIx64, lf, mp->pool_id);
341 rc = npa_lf_aura_pool_pair_free(lf, mp->pool_id);
344 otx2_err("Failed to free pool or aura rc=%d", rc);
346 /* Release the reference of npalf */
351 otx2_npa_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
352 uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
357 * Simply need space for one more object to be able to
358 * fulfill alignment requirements.
360 mem_size = rte_mempool_op_calc_mem_size_default(mp, obj_num + 1,
362 min_chunk_size, align);
365 * Memory area which contains objects must be physically
368 *min_chunk_size = mem_size;
375 otx2_npa_populate(struct rte_mempool *mp, unsigned int max_objs, void *vaddr,
376 rte_iova_t iova, size_t len,
377 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
382 if (iova == RTE_BAD_IOVA)
385 total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
387 /* Align object start address to a multiple of total_elt_sz */
388 off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
393 vaddr = (char *)vaddr + off;
397 npa_lf_aura_op_range_set(mp->pool_id, iova, iova + len);
399 return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len,
403 static struct rte_mempool_ops otx2_npa_ops = {
404 .name = "octeontx2_npa",
405 .alloc = otx2_npa_alloc,
406 .free = otx2_npa_free,
407 .get_count = otx2_npa_get_count,
408 .calc_mem_size = otx2_npa_calc_mem_size,
409 .populate = otx2_npa_populate,
412 MEMPOOL_REGISTER_OPS(otx2_npa_ops);