* Copyright(C) 2019 Marvell International Ltd.
*/
+#include <rte_atomic.h>
#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
#include <rte_pci.h>
#include "otx2_common.h"
+#include "otx2_dev.h"
+#include "otx2_mempool.h"
+
+#define OTX2_NPA_DEV_NAME RTE_STR(otx2_npa_dev_)
+#define OTX2_NPA_DEV_NAME_LEN (sizeof(OTX2_NPA_DEV_NAME) + PCI_PRI_STR_SIZE)
+
+static inline int
+npa_lf_alloc(struct otx2_npa_lf *lf)
+{
+ struct otx2_mbox *mbox = lf->mbox;
+ struct npa_lf_alloc_req *req;
+ struct npa_lf_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npa_lf_alloc(mbox);
+ req->aura_sz = lf->aura_sz;
+ req->nr_pools = lf->nr_pools;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return NPA_LF_ERR_ALLOC;
+
+ lf->stack_pg_ptrs = rsp->stack_pg_ptrs;
+ lf->stack_pg_bytes = rsp->stack_pg_bytes;
+ lf->qints = rsp->qints;
+
+ return 0;
+}
+
+static int
+npa_lf_free(struct otx2_mbox *mbox)
+{
+ otx2_mbox_alloc_msg_npa_lf_free(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+npa_lf_init(struct otx2_npa_lf *lf, uintptr_t base, uint8_t aura_sz,
+ uint32_t nr_pools, struct otx2_mbox *mbox)
+{
+ uint32_t i, bmp_sz;
+ int rc;
+
+ /* Sanity checks */
+ if (!lf || !base || !mbox || !nr_pools)
+ return NPA_LF_ERR_PARAM;
+
+ if (base & AURA_ID_MASK)
+ return NPA_LF_ERR_BASE_INVALID;
+
+ if (aura_sz == NPA_AURA_SZ_0 || aura_sz >= NPA_AURA_SZ_MAX)
+ return NPA_LF_ERR_PARAM;
+
+ memset(lf, 0x0, sizeof(*lf));
+ lf->base = base;
+ lf->aura_sz = aura_sz;
+ lf->nr_pools = nr_pools;
+ lf->mbox = mbox;
+
+ rc = npa_lf_alloc(lf);
+ if (rc)
+ goto exit;
+
+ bmp_sz = rte_bitmap_get_memory_footprint(nr_pools);
+
+ /* Allocate memory for bitmap */
+ lf->npa_bmp_mem = rte_zmalloc("npa_bmp_mem", bmp_sz,
+ RTE_CACHE_LINE_SIZE);
+ if (lf->npa_bmp_mem == NULL) {
+ rc = -ENOMEM;
+ goto lf_free;
+ }
+
+ /* Initialize pool resource bitmap array */
+ lf->npa_bmp = rte_bitmap_init(nr_pools, lf->npa_bmp_mem, bmp_sz);
+ if (lf->npa_bmp == NULL) {
+ rc = -EINVAL;
+ goto bmap_mem_free;
+ }
+
+ /* Mark all pools available */
+ for (i = 0; i < nr_pools; i++)
+ rte_bitmap_set(lf->npa_bmp, i);
+
+ /* Allocate memory for qint context */
+ lf->npa_qint_mem = rte_zmalloc("npa_qint_mem",
+ sizeof(struct otx2_npa_qint) * nr_pools, 0);
+ if (lf->npa_qint_mem == NULL) {
+ rc = -ENOMEM;
+ goto bmap_free;
+ }
+
+ return 0;
+
+bmap_free:
+ rte_bitmap_free(lf->npa_bmp);
+bmap_mem_free:
+ rte_free(lf->npa_bmp_mem);
+lf_free:
+ npa_lf_free(lf->mbox);
+exit:
+ return rc;
+}
+
+static int
+npa_lf_fini(struct otx2_npa_lf *lf)
+{
+ if (!lf)
+ return NPA_LF_ERR_PARAM;
+
+ rte_free(lf->npa_qint_mem);
+ rte_bitmap_free(lf->npa_bmp);
+ rte_free(lf->npa_bmp_mem);
+
+ return npa_lf_free(lf->mbox);
+
+}
+
+static inline uint32_t
+otx2_aura_size_to_u32(uint8_t val)
+{
+ if (val == NPA_AURA_SZ_0)
+ return 128;
+ if (val >= NPA_AURA_SZ_MAX)
+ return BIT_ULL(20);
+
+ return 1 << (val + 6);
+}
+
+static inline int
+npa_lf_attach(struct otx2_mbox *mbox)
+{
+ struct rsrc_attach_req *req;
+
+ req = otx2_mbox_alloc_msg_attach_resources(mbox);
+ req->npalf = true;
+
+ return otx2_mbox_process(mbox);
+}
+
+static inline int
+npa_lf_detach(struct otx2_mbox *mbox)
+{
+ struct rsrc_detach_req *req;
+
+ req = otx2_mbox_alloc_msg_detach_resources(mbox);
+ req->npalf = true;
+
+ return otx2_mbox_process(mbox);
+}
+
+static inline int
+npa_lf_get_msix_offset(struct otx2_mbox *mbox, uint16_t *npa_msixoff)
+{
+ struct msix_offset_rsp *msix_rsp;
+ int rc;
+
+ /* Get NPA and NIX MSIX vector offsets */
+ otx2_mbox_alloc_msg_msix_offset(mbox);
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
+
+ *npa_msixoff = msix_rsp->npa_msixoff;
+
+ return rc;
+}
+
+/**
+ * @internal
+ * Finalize NPA LF.
+ */
+int
+otx2_npa_lf_fini(void)
+{
+ struct otx2_idev_cfg *idev;
+ int rc = 0;
+
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return -ENOMEM;
+
+ if (rte_atomic16_add_return(&idev->npa_refcnt, -1) == 0) {
+ rc |= npa_lf_fini(idev->npa_lf);
+ rc |= npa_lf_detach(idev->npa_lf->mbox);
+ otx2_npa_set_defaults(idev);
+ }
+
+ return rc;
+}
+
+/**
+ * @internal
+ * Initialize NPA LF.
+ */
+int
+otx2_npa_lf_init(struct rte_pci_device *pci_dev, void *otx2_dev)
+{
+ struct otx2_dev *dev = otx2_dev;
+ struct otx2_idev_cfg *idev;
+ struct otx2_npa_lf *lf;
+ uint16_t npa_msixoff;
+ uint32_t nr_pools;
+ uint8_t aura_sz;
+ int rc;
+
+ idev = otx2_intra_dev_get_cfg();
+ if (idev == NULL)
+ return -ENOMEM;
+
+ /* Is NPA LF initialized by any another driver? */
+ if (rte_atomic16_add_return(&idev->npa_refcnt, 1) == 1) {
+
+ rc = npa_lf_attach(dev->mbox);
+ if (rc)
+ goto fail;
+
+ rc = npa_lf_get_msix_offset(dev->mbox, &npa_msixoff);
+ if (rc)
+ goto npa_detach;
+
+ aura_sz = NPA_AURA_SZ_128;
+ nr_pools = otx2_aura_size_to_u32(aura_sz);
+
+ lf = &dev->npalf;
+ rc = npa_lf_init(lf, dev->bar2 + (RVU_BLOCK_ADDR_NPA << 20),
+ aura_sz, nr_pools, dev->mbox);
+
+ if (rc)
+ goto npa_detach;
+
+ lf->pf_func = dev->pf_func;
+ lf->npa_msixoff = npa_msixoff;
+ lf->intr_handle = &pci_dev->intr_handle;
+ lf->pci_dev = pci_dev;
+
+ idev->npa_pf_func = dev->pf_func;
+ idev->npa_lf = lf;
+ rte_smp_wmb();
+
+ rte_mbuf_set_platform_mempool_ops("octeontx2_npa");
+ otx2_npa_dbg("npa_lf=%p pools=%d sz=%d pf_func=0x%x msix=0x%x",
+ lf, nr_pools, aura_sz, lf->pf_func, npa_msixoff);
+ }
+
+ return 0;
+
+npa_detach:
+ npa_lf_detach(dev->mbox);
+fail:
+ rte_atomic16_dec(&idev->npa_refcnt);
+ return rc;
+}
+
+static inline char*
+otx2_npa_dev_to_name(struct rte_pci_device *pci_dev, char *name)
+{
+ snprintf(name, OTX2_NPA_DEV_NAME_LEN,
+ OTX2_NPA_DEV_NAME PCI_PRI_FMT,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+
+ return name;
+}
+
+static int
+otx2_npa_init(struct rte_pci_device *pci_dev)
+{
+ char name[OTX2_NPA_DEV_NAME_LEN];
+ const struct rte_memzone *mz;
+ struct otx2_dev *dev;
+ int rc = -ENOMEM;
+
+ mz = rte_memzone_reserve_aligned(otx2_npa_dev_to_name(pci_dev, name),
+ sizeof(*dev), SOCKET_ID_ANY,
+ 0, OTX2_ALIGN);
+ if (mz == NULL)
+ goto error;
+
+ dev = mz->addr;
+
+ /* Initialize the base otx2_dev object */
+ rc = otx2_dev_init(pci_dev, dev);
+ if (rc)
+ goto malloc_fail;
+
+ /* Grab the NPA LF if required */
+ rc = otx2_npa_lf_init(pci_dev, dev);
+ if (rc)
+ goto dev_uninit;
+
+ dev->drv_inited = true;
+ return 0;
+
+dev_uninit:
+ otx2_npa_lf_fini();
+ otx2_dev_fini(pci_dev, dev);
+malloc_fail:
+ rte_memzone_free(mz);
+error:
+ otx2_err("Failed to initialize npa device rc=%d", rc);
+ return rc;
+}
+
+static int
+otx2_npa_fini(struct rte_pci_device *pci_dev)
+{
+ char name[OTX2_NPA_DEV_NAME_LEN];
+ const struct rte_memzone *mz;
+ struct otx2_dev *dev;
+
+ mz = rte_memzone_lookup(otx2_npa_dev_to_name(pci_dev, name));
+ if (mz == NULL)
+ return -EINVAL;
+
+ dev = mz->addr;
+ if (!dev->drv_inited)
+ goto dev_fini;
+
+ dev->drv_inited = false;
+ otx2_npa_lf_fini();
+
+dev_fini:
+ if (otx2_npa_lf_active(dev)) {
+ otx2_info("%s: common resource in use by other devices",
+ pci_dev->name);
+ return -EAGAIN;
+ }
+
+ otx2_dev_fini(pci_dev, dev);
+ rte_memzone_free(mz);
+
+ return 0;
+}
static int
npa_remove(struct rte_pci_device *pci_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- RTE_SET_USED(pci_dev);
- return 0;
+ return otx2_npa_fini(pci_dev);
}
static int
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- RTE_SET_USED(pci_dev);
- return 0;
+ return otx2_npa_init(pci_dev);
}
static const struct rte_pci_id pci_npa_map[] = {
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_MEMPOOL_H__
+#define __OTX2_MEMPOOL_H__
+
+#include <rte_bitmap.h>
+#include <rte_bus_pci.h>
+#include <rte_devargs.h>
+#include <rte_mempool.h>
+
+#include "otx2_common.h"
+#include "otx2_mbox.h"
+
+enum npa_lf_status {
+ NPA_LF_ERR_PARAM = -512,
+ NPA_LF_ERR_ALLOC = -513,
+ NPA_LF_ERR_INVALID_BLOCK_SZ = -514,
+ NPA_LF_ERR_AURA_ID_ALLOC = -515,
+ NPA_LF_ERR_AURA_POOL_INIT = -516,
+ NPA_LF_ERR_AURA_POOL_FINI = -517,
+ NPA_LF_ERR_BASE_INVALID = -518,
+};
+
+struct otx2_npa_lf;
+struct otx2_npa_qint {
+ struct otx2_npa_lf *lf;
+ uint8_t qintx;
+};
+
+struct otx2_npa_lf {
+ uint16_t qints;
+ uintptr_t base;
+ uint8_t aura_sz;
+ uint16_t pf_func;
+ uint32_t nr_pools;
+ void *npa_bmp_mem;
+ void *npa_qint_mem;
+ uint16_t npa_msixoff;
+ struct otx2_mbox *mbox;
+ uint32_t stack_pg_ptrs;
+ uint32_t stack_pg_bytes;
+ struct rte_bitmap *npa_bmp;
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+};
+
+#define AURA_ID_MASK (BIT_ULL(16) - 1)
+
+/* NPA LF */
+int otx2_npa_lf_init(struct rte_pci_device *pci_dev, void *otx2_dev);
+int otx2_npa_lf_fini(void);
+
+#endif /* __OTX2_MEMPOOL_H__ */