1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
9 roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
12 const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
13 NPA_LF_POOL_OP_PTR_START0;
14 const uint64_t end = roc_npa_aura_handle_to_base(aura_handle) +
15 NPA_LF_POOL_OP_PTR_END0;
16 uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
17 struct npa_lf *lf = idev_npa_obj_get();
18 struct npa_aura_lim *lim;
23 lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
24 lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
26 roc_store_pair(lim[reg].ptr_start, reg, start);
27 roc_store_pair(lim[reg].ptr_end, reg, end);
31 npa_aura_pool_init(struct mbox *mbox, uint32_t aura_id, struct npa_aura_s *aura,
32 struct npa_pool_s *pool)
34 struct npa_aq_enq_req *aura_init_req, *pool_init_req;
35 struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
36 struct mbox_dev *mdev = &mbox->dev[0];
37 int rc = -ENOSPC, off;
39 aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
40 if (aura_init_req == NULL)
42 aura_init_req->aura_id = aura_id;
43 aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
44 aura_init_req->op = NPA_AQ_INSTOP_INIT;
45 mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
47 pool_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
48 if (pool_init_req == NULL)
50 pool_init_req->aura_id = aura_id;
51 pool_init_req->ctype = NPA_AQ_CTYPE_POOL;
52 pool_init_req->op = NPA_AQ_INSTOP_INIT;
53 mbox_memcpy(&pool_init_req->pool, pool, sizeof(*pool));
55 rc = mbox_process(mbox);
59 off = mbox->rx_start +
60 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
61 aura_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
62 off = mbox->rx_start + aura_init_rsp->hdr.next_msgoff;
63 pool_init_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
65 if (aura_init_rsp->hdr.rc == 0 && pool_init_rsp->hdr.rc == 0)
68 return NPA_ERR_AURA_POOL_INIT;
72 npa_aura_pool_fini(struct mbox *mbox, uint32_t aura_id, uint64_t aura_handle)
74 struct npa_aq_enq_req *aura_req, *pool_req;
75 struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
76 struct mbox_dev *mdev = &mbox->dev[0];
77 struct ndc_sync_op *ndc_req;
78 int rc = -ENOSPC, off;
81 /* Procedure for disabling an aura/pool */
84 /* Clear all the pointers from the aura */
86 ptr = roc_npa_aura_op_alloc(aura_handle, 0);
89 pool_req = mbox_alloc_msg_npa_aq_enq(mbox);
92 pool_req->aura_id = aura_id;
93 pool_req->ctype = NPA_AQ_CTYPE_POOL;
94 pool_req->op = NPA_AQ_INSTOP_WRITE;
95 pool_req->pool.ena = 0;
96 pool_req->pool_mask.ena = ~pool_req->pool_mask.ena;
98 aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
101 aura_req->aura_id = aura_id;
102 aura_req->ctype = NPA_AQ_CTYPE_AURA;
103 aura_req->op = NPA_AQ_INSTOP_WRITE;
104 aura_req->aura.ena = 0;
105 aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
107 rc = mbox_process(mbox);
111 off = mbox->rx_start +
112 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
113 pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
115 off = mbox->rx_start + pool_rsp->hdr.next_msgoff;
116 aura_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
118 if (aura_rsp->hdr.rc != 0 || pool_rsp->hdr.rc != 0)
119 return NPA_ERR_AURA_POOL_FINI;
121 /* Sync NDC-NPA for LF */
122 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
125 ndc_req->npa_lf_sync = 1;
126 rc = mbox_process(mbox);
128 plt_err("Error on NDC-NPA LF sync, rc %d", rc);
129 return NPA_ERR_AURA_POOL_FINI;
135 roc_npa_pool_op_pc_reset(uint64_t aura_handle)
137 struct npa_lf *lf = idev_npa_obj_get();
138 struct npa_aq_enq_req *pool_req;
139 struct npa_aq_enq_rsp *pool_rsp;
140 struct ndc_sync_op *ndc_req;
141 struct mbox_dev *mdev;
142 int rc = -ENOSPC, off;
146 return NPA_ERR_PARAM;
149 mdev = &mbox->dev[0];
150 plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle);
152 pool_req = mbox_alloc_msg_npa_aq_enq(mbox);
153 if (pool_req == NULL)
155 pool_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
156 pool_req->ctype = NPA_AQ_CTYPE_POOL;
157 pool_req->op = NPA_AQ_INSTOP_WRITE;
158 pool_req->pool.op_pc = 0;
159 pool_req->pool_mask.op_pc = ~pool_req->pool_mask.op_pc;
161 rc = mbox_process(mbox);
165 off = mbox->rx_start +
166 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
167 pool_rsp = (struct npa_aq_enq_rsp *)((uintptr_t)mdev->mbase + off);
169 if (pool_rsp->hdr.rc != 0)
170 return NPA_ERR_AURA_POOL_FINI;
172 /* Sync NDC-NPA for LF */
173 ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
176 ndc_req->npa_lf_sync = 1;
177 rc = mbox_process(mbox);
179 plt_err("Error on NDC-NPA LF sync, rc %d", rc);
180 return NPA_ERR_AURA_POOL_FINI;
185 npa_stack_memzone_name(struct npa_lf *lf, int pool_id, char *name)
187 snprintf(name, PLT_MEMZONE_NAMESIZE, "roc_npa_stack_%x_%d", lf->pf_func,
192 static inline const struct plt_memzone *
193 npa_stack_dma_alloc(struct npa_lf *lf, char *name, int pool_id, size_t size)
195 const char *mz_name = npa_stack_memzone_name(lf, pool_id, name);
197 return plt_memzone_reserve_cache_align(mz_name, size);
201 npa_stack_dma_free(struct npa_lf *lf, char *name, int pool_id)
203 const struct plt_memzone *mz;
205 mz = plt_memzone_lookup(npa_stack_memzone_name(lf, pool_id, name));
207 return NPA_ERR_PARAM;
209 return plt_memzone_free(mz);
213 bitmap_ctzll(uint64_t slab)
218 return __builtin_ctzll(slab);
222 npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
223 const uint32_t block_count, struct npa_aura_s *aura,
224 struct npa_pool_s *pool, uint64_t *aura_handle)
226 int rc, aura_id, pool_id, stack_size, alloc_size;
227 char name[PLT_MEMZONE_NAMESIZE];
228 const struct plt_memzone *mz;
233 if (!lf || !block_size || !block_count || !pool || !aura ||
235 return NPA_ERR_PARAM;
237 /* Block size should be cache line aligned and in range of 128B-128KB */
238 if (block_size % ROC_ALIGN || block_size < 128 ||
239 block_size > ROC_NPA_MAX_BLOCK_SZ)
240 return NPA_ERR_INVALID_BLOCK_SZ;
244 /* Scan from the beginning */
245 plt_bitmap_scan_init(lf->npa_bmp);
246 /* Scan bitmap to get the free pool */
247 rc = plt_bitmap_scan(lf->npa_bmp, &pos, &slab);
250 plt_err("Mempools exhausted");
251 return NPA_ERR_AURA_ID_ALLOC;
254 /* Get aura_id from resource bitmap */
255 aura_id = pos + bitmap_ctzll(slab);
256 /* Mark pool as reserved */
257 plt_bitmap_clear(lf->npa_bmp, aura_id);
259 /* Configuration based on each aura has separate pool(aura-pool pair) */
261 rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools ||
262 aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ?
263 NPA_ERR_AURA_ID_ALLOC :
268 /* Allocate stack memory */
269 stack_size = (block_count + lf->stack_pg_ptrs - 1) / lf->stack_pg_ptrs;
270 alloc_size = stack_size * lf->stack_pg_bytes;
272 mz = npa_stack_dma_alloc(lf, name, pool_id, alloc_size);
278 /* Update aura fields */
279 aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
281 aura->shift = __builtin_clz(block_count) - 8;
282 aura->limit = block_count;
283 aura->pool_caching = 1;
284 aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
285 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
286 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
287 aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
288 /* Many to one reduction */
289 aura->err_qint_idx = aura_id % lf->qints;
291 /* Update pool fields */
292 pool->stack_base = mz->iova;
294 pool->buf_size = block_size / ROC_ALIGN;
295 pool->stack_max_pages = stack_size;
296 pool->shift = __builtin_clz(block_count) - 8;
299 pool->stack_caching = 1;
300 pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
301 pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
302 pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
304 /* Many to one reduction */
305 pool->err_qint_idx = pool_id % lf->qints;
307 /* Issue AURA_INIT and POOL_INIT op */
308 rc = npa_aura_pool_init(lf->mbox, aura_id, aura, pool);
312 *aura_handle = roc_npa_aura_handle_gen(aura_id, lf->base);
313 /* Update aura count */
314 roc_npa_aura_op_cnt_set(*aura_handle, 0, block_count);
315 /* Read it back to make sure aura count is updated */
316 roc_npa_aura_op_cnt_get(*aura_handle);
321 plt_memzone_free(mz);
323 plt_bitmap_set(lf->npa_bmp, aura_id);
329 roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
330 uint32_t block_count, struct npa_aura_s *aura,
331 struct npa_pool_s *pool)
333 struct npa_aura_s defaura;
334 struct npa_pool_s defpool;
335 struct idev_cfg *idev;
339 lf = idev_npa_obj_get();
341 rc = NPA_ERR_DEVICE_NOT_BOUNDED;
345 idev = idev_get_cfg();
352 memset(&defaura, 0, sizeof(struct npa_aura_s));
356 memset(&defpool, 0, sizeof(struct npa_pool_s));
357 defpool.nat_align = 1;
358 defpool.buf_offset = 1;
362 rc = npa_aura_pool_pair_alloc(lf, block_size, block_count, aura, pool,
365 plt_err("Failed to alloc pool or aura rc=%d", rc);
369 plt_npa_dbg("lf=%p block_sz=%d block_count=%d aura_handle=0x%" PRIx64,
370 lf, block_size, block_count, *aura_handle);
372 /* Just hold the reference of the object */
373 __atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST);
379 roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit)
381 struct npa_aq_enq_req *aura_req;
385 lf = idev_npa_obj_get();
387 return NPA_ERR_DEVICE_NOT_BOUNDED;
389 aura_req = mbox_alloc_msg_npa_aq_enq(lf->mbox);
390 if (aura_req == NULL)
392 aura_req->aura_id = roc_npa_aura_handle_to_aura(aura_handle);
393 aura_req->ctype = NPA_AQ_CTYPE_AURA;
394 aura_req->op = NPA_AQ_INSTOP_WRITE;
396 aura_req->aura.limit = aura_limit;
397 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
398 rc = mbox_process(lf->mbox);
404 npa_aura_pool_pair_free(struct npa_lf *lf, uint64_t aura_handle)
406 char name[PLT_MEMZONE_NAMESIZE];
407 int aura_id, pool_id, rc;
409 if (!lf || !aura_handle)
410 return NPA_ERR_PARAM;
412 aura_id = roc_npa_aura_handle_to_aura(aura_handle);
414 rc = npa_aura_pool_fini(lf->mbox, aura_id, aura_handle);
415 rc |= npa_stack_dma_free(lf, name, pool_id);
417 plt_bitmap_set(lf->npa_bmp, aura_id);
423 roc_npa_pool_destroy(uint64_t aura_handle)
425 struct npa_lf *lf = idev_npa_obj_get();
428 plt_npa_dbg("lf=%p aura_handle=0x%" PRIx64, lf, aura_handle);
429 rc = npa_aura_pool_pair_free(lf, aura_handle);
431 plt_err("Failed to destroy pool or aura rc=%d", rc);
433 /* Release the reference of npa */
439 roc_npa_pool_range_update_check(uint64_t aura_handle)
441 uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
443 struct npa_aura_lim *lim;
444 __io struct npa_pool_s *pool;
445 struct npa_aq_enq_req *req;
446 struct npa_aq_enq_rsp *rsp;
449 lf = idev_npa_obj_get();
451 return NPA_ERR_PARAM;
455 req = mbox_alloc_msg_npa_aq_enq(lf->mbox);
459 req->aura_id = aura_id;
460 req->ctype = NPA_AQ_CTYPE_POOL;
461 req->op = NPA_AQ_INSTOP_READ;
463 rc = mbox_process_msg(lf->mbox, (void *)&rsp);
465 plt_err("Failed to get pool(0x%" PRIx64 ") context", aura_id);
470 if (lim[aura_id].ptr_start != pool->ptr_start ||
471 lim[aura_id].ptr_end != pool->ptr_end) {
472 plt_err("Range update failed on pool(0x%" PRIx64 ")", aura_id);
473 return NPA_ERR_PARAM;
480 npa_attach(struct mbox *mbox)
482 struct rsrc_attach_req *req;
484 req = mbox_alloc_msg_attach_resources(mbox);
490 return mbox_process(mbox);
494 npa_detach(struct mbox *mbox)
496 struct rsrc_detach_req *req;
498 req = mbox_alloc_msg_detach_resources(mbox);
504 return mbox_process(mbox);
508 npa_get_msix_offset(struct mbox *mbox, uint16_t *npa_msixoff)
510 struct msix_offset_rsp *msix_rsp;
513 /* Get NPA MSIX vector offsets */
514 mbox_alloc_msg_msix_offset(mbox);
515 rc = mbox_process_msg(mbox, (void *)&msix_rsp);
517 *npa_msixoff = msix_rsp->npa_msixoff;
523 npa_lf_alloc(struct npa_lf *lf)
525 struct mbox *mbox = lf->mbox;
526 struct npa_lf_alloc_req *req;
527 struct npa_lf_alloc_rsp *rsp;
530 req = mbox_alloc_msg_npa_lf_alloc(mbox);
533 req->aura_sz = lf->aura_sz;
534 req->nr_pools = lf->nr_pools;
536 rc = mbox_process_msg(mbox, (void *)&rsp);
538 return NPA_ERR_ALLOC;
540 lf->stack_pg_ptrs = rsp->stack_pg_ptrs;
541 lf->stack_pg_bytes = rsp->stack_pg_bytes;
542 lf->qints = rsp->qints;
548 npa_lf_free(struct mbox *mbox)
550 mbox_alloc_msg_npa_lf_free(mbox);
551 return mbox_process(mbox);
554 static inline uint32_t
555 aura_size_to_u32(uint8_t val)
557 if (val == NPA_AURA_SZ_0)
559 if (val >= NPA_AURA_SZ_MAX)
562 return 1 << (val + 6);
566 pool_count_aura_sz_get(uint32_t *nr_pools, uint8_t *aura_sz)
570 val = roc_idev_npa_maxpools_get();
571 if (val < aura_size_to_u32(NPA_AURA_SZ_128))
573 if (val > aura_size_to_u32(NPA_AURA_SZ_1M))
576 roc_idev_npa_maxpools_set(val);
578 *aura_sz = plt_log2_u32(val) - 6;
582 npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
584 uint32_t i, bmp_sz, nr_pools;
589 if (!lf || !base || !mbox)
590 return NPA_ERR_PARAM;
592 if (base & ROC_AURA_ID_MASK)
593 return NPA_ERR_BASE_INVALID;
595 pool_count_aura_sz_get(&nr_pools, &aura_sz);
596 if (aura_sz == NPA_AURA_SZ_0 || aura_sz >= NPA_AURA_SZ_MAX)
597 return NPA_ERR_PARAM;
599 memset(lf, 0x0, sizeof(*lf));
601 lf->aura_sz = aura_sz;
602 lf->nr_pools = nr_pools;
605 rc = npa_lf_alloc(lf);
609 bmp_sz = plt_bitmap_get_memory_footprint(nr_pools);
611 /* Allocate memory for bitmap */
612 lf->npa_bmp_mem = plt_zmalloc(bmp_sz, ROC_ALIGN);
613 if (lf->npa_bmp_mem == NULL) {
618 /* Initialize pool resource bitmap array */
619 lf->npa_bmp = plt_bitmap_init(nr_pools, lf->npa_bmp_mem, bmp_sz);
620 if (lf->npa_bmp == NULL) {
625 /* Mark all pools available */
626 for (i = 0; i < nr_pools; i++)
627 plt_bitmap_set(lf->npa_bmp, i);
629 /* Allocate memory for qint context */
630 lf->npa_qint_mem = plt_zmalloc(sizeof(struct npa_qint) * nr_pools, 0);
631 if (lf->npa_qint_mem == NULL) {
636 /* Allocate memory for nap_aura_lim memory */
637 lf->aura_lim = plt_zmalloc(sizeof(struct npa_aura_lim) * nr_pools, 0);
638 if (lf->aura_lim == NULL) {
643 /* Init aura start & end limits */
644 for (i = 0; i < nr_pools; i++) {
645 lf->aura_lim[i].ptr_start = UINT64_MAX;
646 lf->aura_lim[i].ptr_end = 0x0ull;
652 plt_free(lf->npa_qint_mem);
654 plt_bitmap_free(lf->npa_bmp);
656 plt_free(lf->npa_bmp_mem);
658 npa_lf_free(lf->mbox);
664 npa_dev_fini(struct npa_lf *lf)
667 return NPA_ERR_PARAM;
669 plt_free(lf->aura_lim);
670 plt_free(lf->npa_qint_mem);
671 plt_bitmap_free(lf->npa_bmp);
672 plt_free(lf->npa_bmp_mem);
674 return npa_lf_free(lf->mbox);
678 npa_lf_init(struct dev *dev, struct plt_pci_device *pci_dev)
680 struct idev_cfg *idev;
681 uint16_t npa_msixoff;
685 idev = idev_get_cfg();
687 return NPA_ERR_ALLOC;
689 /* Not the first PCI device */
690 if (__atomic_fetch_add(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST) != 0)
693 rc = npa_attach(dev->mbox);
697 rc = npa_get_msix_offset(dev->mbox, &npa_msixoff);
702 rc = npa_dev_init(lf, dev->bar2 + (RVU_BLOCK_ADDR_NPA << 20),
707 lf->pf_func = dev->pf_func;
708 lf->npa_msixoff = npa_msixoff;
709 lf->intr_handle = &pci_dev->intr_handle;
710 lf->pci_dev = pci_dev;
712 idev->npa_pf_func = dev->pf_func;
716 rc = npa_register_irqs(lf);
720 plt_npa_dbg("npa=%p max_pools=%d pf_func=0x%x msix=0x%x", lf,
721 roc_idev_npa_maxpools_get(), lf->pf_func, npa_msixoff);
726 npa_dev_fini(idev->npa);
728 npa_detach(dev->mbox);
730 __atomic_fetch_sub(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST);
737 struct idev_cfg *idev;
740 idev = idev_get_cfg();
742 return NPA_ERR_ALLOC;
744 /* Not the last PCI device */
745 if (__atomic_sub_fetch(&idev->npa_refcnt, 1, __ATOMIC_SEQ_CST) != 0)
748 npa_unregister_irqs(idev->npa);
749 rc |= npa_dev_fini(idev->npa);
750 rc |= npa_detach(idev->npa->mbox);
751 idev_set_defaults(idev);
757 roc_npa_dev_init(struct roc_npa *roc_npa)
759 struct plt_pci_device *pci_dev;
764 if (roc_npa == NULL || roc_npa->pci_dev == NULL)
765 return NPA_ERR_PARAM;
767 PLT_STATIC_ASSERT(sizeof(struct npa) <= ROC_NPA_MEM_SZ);
768 npa = roc_npa_to_npa_priv(roc_npa);
769 memset(npa, 0, sizeof(*npa));
770 pci_dev = roc_npa->pci_dev;
773 /* Initialize device */
774 rc = dev_init(dev, pci_dev);
776 plt_err("Failed to init roc device");
780 npa->pci_dev = pci_dev;
781 dev->drv_inited = true;
787 roc_npa_dev_fini(struct roc_npa *roc_npa)
789 struct npa *npa = roc_npa_to_npa_priv(roc_npa);
792 return NPA_ERR_PARAM;
794 npa->dev.drv_inited = false;
795 return dev_fini(&npa->dev, npa->pci_dev);