1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_mempool.h>
8 #include "otx2_mempool.h"
11 npa_lf_aura_pool_init(struct otx2_mbox *mbox, uint32_t aura_id,
12 struct npa_aura_s *aura, struct npa_pool_s *pool)
14 struct npa_aq_enq_req *aura_init_req, *pool_init_req;
15 struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
16 struct otx2_mbox_dev *mdev = &mbox->dev[0];
19 aura_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
21 aura_init_req->aura_id = aura_id;
22 aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
23 aura_init_req->op = NPA_AQ_INSTOP_INIT;
24 memcpy(&aura_init_req->aura, aura, sizeof(*aura));
26 pool_init_req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
28 pool_init_req->aura_id = aura_id;
29 pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
30 pool_init_req->op = NPA_AQ_INSTOP_INIT;
31 memcpy(&pool_init_req->pool, pool, sizeof(*pool));
33 otx2_mbox_msg_send(mbox, 0);
34 rc = otx2_mbox_wait_for_rsp(mbox, 0);
38 off = mbox->rx_start +
39 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
40 aura_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
41 off = mbox->rx_start + aura_init_rsp->hdr.next_msgoff;
42 pool_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
44 if (rc == 2 && aura_init_rsp->hdr.rc == 0 && pool_init_rsp->hdr.rc == 0)
47 return NPA_LF_ERR_AURA_POOL_INIT;
51 npa_lf_stack_memzone_name(struct otx2_npa_lf *lf, int pool_id, char *name)
53 snprintf(name, RTE_MEMZONE_NAMESIZE, "otx2_npa_stack_%x_%d",
54 lf->pf_func, pool_id);
59 static inline const struct rte_memzone *
60 npa_lf_stack_dma_alloc(struct otx2_npa_lf *lf, char *name,
61 int pool_id, size_t size)
63 return rte_memzone_reserve_aligned(
64 npa_lf_stack_memzone_name(lf, pool_id, name), size, 0,
65 RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
69 bitmap_ctzll(uint64_t slab)
74 return __builtin_ctzll(slab);
78 npa_lf_aura_pool_pair_alloc(struct otx2_npa_lf *lf, const uint32_t block_size,
79 const uint32_t block_count, struct npa_aura_s *aura,
80 struct npa_pool_s *pool, uint64_t *aura_handle)
82 int rc, aura_id, pool_id, stack_size, alloc_size;
83 char name[RTE_MEMZONE_NAMESIZE];
84 const struct rte_memzone *mz;
89 if (!lf || !block_size || !block_count ||
90 !pool || !aura || !aura_handle)
91 return NPA_LF_ERR_PARAM;
93 /* Block size should be cache line aligned and in range of 128B-128KB */
94 if (block_size % OTX2_ALIGN || block_size < 128 ||
95 block_size > 128 * 1024)
96 return NPA_LF_ERR_INVALID_BLOCK_SZ;
99 /* Scan from the beginning */
100 __rte_bitmap_scan_init(lf->npa_bmp);
101 /* Scan bitmap to get the free pool */
102 rc = rte_bitmap_scan(lf->npa_bmp, &pos, &slab);
105 otx2_err("Mempools exhausted, 'max_pools' devargs to increase");
109 /* Get aura_id from resource bitmap */
110 aura_id = pos + bitmap_ctzll(slab);
111 /* Mark pool as reserved */
112 rte_bitmap_clear(lf->npa_bmp, aura_id);
114 /* Configuration based on each aura has separate pool(aura-pool pair) */
116 rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools || aura_id >=
117 (int)BIT_ULL(6 + lf->aura_sz)) ? NPA_LF_ERR_AURA_ID_ALLOC : 0;
121 /* Allocate stack memory */
122 stack_size = (block_count + lf->stack_pg_ptrs - 1) / lf->stack_pg_ptrs;
123 alloc_size = stack_size * lf->stack_pg_bytes;
125 mz = npa_lf_stack_dma_alloc(lf, name, pool_id, alloc_size);
131 /* Update aura fields */
132 aura->pool_addr = pool_id;/* AF will translate to associated poolctx */
134 aura->shift = __builtin_clz(block_count) - 8;
135 aura->limit = block_count;
136 aura->pool_caching = 1;
137 aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
138 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
139 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
140 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
141 /* Many to one reduction */
142 aura->err_qint_idx = aura_id % lf->qints;
144 /* Update pool fields */
145 pool->stack_base = mz->iova;
147 pool->buf_size = block_size / OTX2_ALIGN;
148 pool->stack_max_pages = stack_size;
149 pool->shift = __builtin_clz(block_count) - 8;
152 pool->stack_caching = 1;
153 pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
154 pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
155 pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
157 /* Many to one reduction */
158 pool->err_qint_idx = pool_id % lf->qints;
160 /* Issue AURA_INIT and POOL_INIT op */
161 rc = npa_lf_aura_pool_init(lf->mbox, aura_id, aura, pool);
165 *aura_handle = npa_lf_aura_handle_gen(aura_id, lf->base);
167 /* Update aura count */
168 npa_lf_aura_op_cnt_set(*aura_handle, 0, block_count);
169 /* Read it back to make sure aura count is updated */
170 npa_lf_aura_op_cnt_get(*aura_handle);
175 rte_memzone_free(mz);
177 rte_bitmap_set(lf->npa_bmp, aura_id);
183 otx2_npa_alloc(struct rte_mempool *mp)
185 uint32_t block_size, block_count;
186 struct otx2_npa_lf *lf;
187 struct npa_aura_s aura;
188 struct npa_pool_s pool;
189 uint64_t aura_handle;
192 lf = otx2_npa_lf_obj_get();
198 block_size = mp->elt_size + mp->header_size + mp->trailer_size;
199 block_count = mp->size;
201 if (block_size % OTX2_ALIGN != 0) {
202 otx2_err("Block size should be multiple of 128B");
207 memset(&aura, 0, sizeof(struct npa_aura_s));
208 memset(&pool, 0, sizeof(struct npa_pool_s));
212 if ((uint32_t)pool.buf_offset * OTX2_ALIGN != mp->header_size) {
213 otx2_err("Unsupported mp->header_size=%d", mp->header_size);
218 /* Use driver specific mp->pool_config to override aura config */
219 if (mp->pool_config != NULL)
220 memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
222 rc = npa_lf_aura_pool_pair_alloc(lf, block_size, block_count,
223 &aura, &pool, &aura_handle);
225 otx2_err("Failed to alloc pool or aura rc=%d", rc);
229 /* Store aura_handle for future queue operations */
230 mp->pool_id = aura_handle;
231 otx2_npa_dbg("lf=%p block_sz=%d block_count=%d aura_handle=0x%"PRIx64,
232 lf, block_size, block_count, aura_handle);
234 /* Just hold the reference of the object */
235 otx2_npa_lf_obj_ref();
241 static struct rte_mempool_ops otx2_npa_ops = {
242 .name = "octeontx2_npa",
243 .alloc = otx2_npa_alloc,
246 MEMPOOL_REGISTER_OPS(otx2_npa_ops);