1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_atomic.h>
6 #include <rte_bus_pci.h>
7 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_mbuf_pool_ops.h>
14 #include "otx2_common.h"
16 #include "otx2_mempool.h"
18 #define OTX2_NPA_DEV_NAME RTE_STR(otx2_npa_dev_)
19 #define OTX2_NPA_DEV_NAME_LEN (sizeof(OTX2_NPA_DEV_NAME) + PCI_PRI_STR_SIZE)
22 npa_lf_alloc(struct otx2_npa_lf *lf)
24 struct otx2_mbox *mbox = lf->mbox;
25 struct npa_lf_alloc_req *req;
26 struct npa_lf_alloc_rsp *rsp;
29 req = otx2_mbox_alloc_msg_npa_lf_alloc(mbox);
30 req->aura_sz = lf->aura_sz;
31 req->nr_pools = lf->nr_pools;
33 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
35 return NPA_LF_ERR_ALLOC;
37 lf->stack_pg_ptrs = rsp->stack_pg_ptrs;
38 lf->stack_pg_bytes = rsp->stack_pg_bytes;
39 lf->qints = rsp->qints;
45 npa_lf_free(struct otx2_mbox *mbox)
47 otx2_mbox_alloc_msg_npa_lf_free(mbox);
49 return otx2_mbox_process(mbox);
53 npa_lf_init(struct otx2_npa_lf *lf, uintptr_t base, uint8_t aura_sz,
54 uint32_t nr_pools, struct otx2_mbox *mbox)
60 if (!lf || !base || !mbox || !nr_pools)
61 return NPA_LF_ERR_PARAM;
63 if (base & AURA_ID_MASK)
64 return NPA_LF_ERR_BASE_INVALID;
66 if (aura_sz == NPA_AURA_SZ_0 || aura_sz >= NPA_AURA_SZ_MAX)
67 return NPA_LF_ERR_PARAM;
69 memset(lf, 0x0, sizeof(*lf));
71 lf->aura_sz = aura_sz;
72 lf->nr_pools = nr_pools;
75 rc = npa_lf_alloc(lf);
79 bmp_sz = rte_bitmap_get_memory_footprint(nr_pools);
81 /* Allocate memory for bitmap */
82 lf->npa_bmp_mem = rte_zmalloc("npa_bmp_mem", bmp_sz,
84 if (lf->npa_bmp_mem == NULL) {
89 /* Initialize pool resource bitmap array */
90 lf->npa_bmp = rte_bitmap_init(nr_pools, lf->npa_bmp_mem, bmp_sz);
91 if (lf->npa_bmp == NULL) {
96 /* Mark all pools available */
97 for (i = 0; i < nr_pools; i++)
98 rte_bitmap_set(lf->npa_bmp, i);
100 /* Allocate memory for qint context */
101 lf->npa_qint_mem = rte_zmalloc("npa_qint_mem",
102 sizeof(struct otx2_npa_qint) * nr_pools, 0);
103 if (lf->npa_qint_mem == NULL) {
108 /* Allocate memory for nap_aura_lim memory */
109 lf->aura_lim = rte_zmalloc("npa_aura_lim_mem",
110 sizeof(struct npa_aura_lim) * nr_pools, 0);
111 if (lf->aura_lim == NULL) {
116 /* Init aura start & end limits */
117 for (i = 0; i < nr_pools; i++) {
118 lf->aura_lim[i].ptr_start = UINT64_MAX;
119 lf->aura_lim[i].ptr_end = 0x0ull;
125 rte_free(lf->npa_qint_mem);
127 rte_bitmap_free(lf->npa_bmp);
129 rte_free(lf->npa_bmp_mem);
131 npa_lf_free(lf->mbox);
137 npa_lf_fini(struct otx2_npa_lf *lf)
140 return NPA_LF_ERR_PARAM;
142 rte_free(lf->aura_lim);
143 rte_free(lf->npa_qint_mem);
144 rte_bitmap_free(lf->npa_bmp);
145 rte_free(lf->npa_bmp_mem);
147 return npa_lf_free(lf->mbox);
151 static inline uint32_t
152 otx2_aura_size_to_u32(uint8_t val)
154 if (val == NPA_AURA_SZ_0)
156 if (val >= NPA_AURA_SZ_MAX)
159 return 1 << (val + 6);
163 npa_lf_attach(struct otx2_mbox *mbox)
165 struct rsrc_attach_req *req;
167 req = otx2_mbox_alloc_msg_attach_resources(mbox);
170 return otx2_mbox_process(mbox);
174 npa_lf_detach(struct otx2_mbox *mbox)
176 struct rsrc_detach_req *req;
178 req = otx2_mbox_alloc_msg_detach_resources(mbox);
181 return otx2_mbox_process(mbox);
185 npa_lf_get_msix_offset(struct otx2_mbox *mbox, uint16_t *npa_msixoff)
187 struct msix_offset_rsp *msix_rsp;
190 /* Get NPA and NIX MSIX vector offsets */
191 otx2_mbox_alloc_msg_msix_offset(mbox);
193 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
195 *npa_msixoff = msix_rsp->npa_msixoff;
205 otx2_npa_lf_fini(void)
207 struct otx2_idev_cfg *idev;
210 idev = otx2_intra_dev_get_cfg();
214 if (rte_atomic16_add_return(&idev->npa_refcnt, -1) == 0) {
215 otx2_npa_unregister_irqs(idev->npa_lf);
216 rc |= npa_lf_fini(idev->npa_lf);
217 rc |= npa_lf_detach(idev->npa_lf->mbox);
218 otx2_npa_set_defaults(idev);
229 otx2_npa_lf_init(struct rte_pci_device *pci_dev, void *otx2_dev)
231 struct otx2_dev *dev = otx2_dev;
232 struct otx2_idev_cfg *idev;
233 struct otx2_npa_lf *lf;
234 uint16_t npa_msixoff;
239 idev = otx2_intra_dev_get_cfg();
243 /* Is NPA LF initialized by any another driver? */
244 if (rte_atomic16_add_return(&idev->npa_refcnt, 1) == 1) {
246 rc = npa_lf_attach(dev->mbox);
250 rc = npa_lf_get_msix_offset(dev->mbox, &npa_msixoff);
254 aura_sz = NPA_AURA_SZ_128;
255 nr_pools = otx2_aura_size_to_u32(aura_sz);
258 rc = npa_lf_init(lf, dev->bar2 + (RVU_BLOCK_ADDR_NPA << 20),
259 aura_sz, nr_pools, dev->mbox);
264 lf->pf_func = dev->pf_func;
265 lf->npa_msixoff = npa_msixoff;
266 lf->intr_handle = &pci_dev->intr_handle;
267 lf->pci_dev = pci_dev;
269 idev->npa_pf_func = dev->pf_func;
272 rc = otx2_npa_register_irqs(lf);
276 rte_mbuf_set_platform_mempool_ops("octeontx2_npa");
277 otx2_npa_dbg("npa_lf=%p pools=%d sz=%d pf_func=0x%x msix=0x%x",
278 lf, nr_pools, aura_sz, lf->pf_func, npa_msixoff);
284 npa_lf_fini(idev->npa_lf);
286 npa_lf_detach(dev->mbox);
288 rte_atomic16_dec(&idev->npa_refcnt);
293 otx2_npa_dev_to_name(struct rte_pci_device *pci_dev, char *name)
295 snprintf(name, OTX2_NPA_DEV_NAME_LEN,
296 OTX2_NPA_DEV_NAME PCI_PRI_FMT,
297 pci_dev->addr.domain, pci_dev->addr.bus,
298 pci_dev->addr.devid, pci_dev->addr.function);
304 otx2_npa_init(struct rte_pci_device *pci_dev)
306 char name[OTX2_NPA_DEV_NAME_LEN];
307 const struct rte_memzone *mz;
308 struct otx2_dev *dev;
311 mz = rte_memzone_reserve_aligned(otx2_npa_dev_to_name(pci_dev, name),
312 sizeof(*dev), SOCKET_ID_ANY,
319 /* Initialize the base otx2_dev object */
320 rc = otx2_dev_init(pci_dev, dev);
324 /* Grab the NPA LF if required */
325 rc = otx2_npa_lf_init(pci_dev, dev);
329 dev->drv_inited = true;
334 otx2_dev_fini(pci_dev, dev);
336 rte_memzone_free(mz);
338 otx2_err("Failed to initialize npa device rc=%d", rc);
343 otx2_npa_fini(struct rte_pci_device *pci_dev)
345 char name[OTX2_NPA_DEV_NAME_LEN];
346 const struct rte_memzone *mz;
347 struct otx2_dev *dev;
349 mz = rte_memzone_lookup(otx2_npa_dev_to_name(pci_dev, name));
354 if (!dev->drv_inited)
357 dev->drv_inited = false;
361 if (otx2_npa_lf_active(dev)) {
362 otx2_info("%s: common resource in use by other devices",
367 otx2_dev_fini(pci_dev, dev);
368 rte_memzone_free(mz);
374 npa_remove(struct rte_pci_device *pci_dev)
376 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
379 return otx2_npa_fini(pci_dev);
383 npa_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
385 RTE_SET_USED(pci_drv);
387 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
390 return otx2_npa_init(pci_dev);
393 static const struct rte_pci_id pci_npa_map[] = {
395 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
396 PCI_DEVID_OCTEONTX2_RVU_NPA_PF)
399 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
400 PCI_DEVID_OCTEONTX2_RVU_NPA_VF)
407 static struct rte_pci_driver pci_npa = {
408 .id_table = pci_npa_map,
409 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
411 .remove = npa_remove,
414 RTE_PMD_REGISTER_PCI(mempool_octeontx2, pci_npa);
415 RTE_PMD_REGISTER_PCI_TABLE(mempool_octeontx2, pci_npa_map);
416 RTE_PMD_REGISTER_KMOD_DEP(mempool_octeontx2, "vfio-pci");