mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs);
}
-static uint16_t
-dev_pf_total_vfs(struct plt_pci_device *pci_dev)
-{
- uint16_t total_vfs = 0;
- int sriov_pos, rc;
-
- sriov_pos =
- plt_pci_find_ext_capability(pci_dev, ROC_PCI_EXT_CAP_ID_SRIOV);
- if (sriov_pos <= 0) {
- plt_warn("Unable to find SRIOV cap, rc=%d", sriov_pos);
- return 0;
- }
-
- rc = plt_pci_read_config(pci_dev, &total_vfs, 2,
- sriov_pos + ROC_PCI_SRIOV_TOTAL_VF);
- if (rc < 0) {
- plt_warn("Unable to read SRIOV cap, rc=%d", rc);
- return 0;
- }
-
- return total_vfs;
-}
-
static int
-dev_setup_shared_lmt_region(struct mbox *mbox)
+dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova)
{
struct lmtst_tbl_setup_req *req;
req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
- req->pcifunc = idev_lmt_pffunc_get();
+ /* This pcifunc is defined with primary pcifunc whose LMT address
+ * will be shared. If call contains valid IOVA, following pcifunc
+ * field is of no use.
+ */
+ req->pcifunc = valid_iova ? 0 : idev_lmt_pffunc_get();
+ req->use_local_lmt_region = valid_iova;
+ req->lmt_iova = iova;
return mbox_process(mbox);
}
+/* Total no of lines * size of each lmtline */
+#define LMT_REGION_SIZE (ROC_NUM_LMT_LINES * ROC_LMT_LINE_SZ)
static int
-dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
+dev_lmt_setup(struct dev *dev)
{
+ char name[PLT_MEMZONE_NAMESIZE];
+ const struct plt_memzone *mz;
struct idev_cfg *idev;
int rc;
/* Set common lmt region from second pf_func onwards. */
if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() &&
dev->pf_func != idev_lmt_pffunc_get()) {
- rc = dev_setup_shared_lmt_region(dev->mbox);
+ rc = dev_setup_shared_lmt_region(dev->mbox, false, 0);
if (!rc) {
+ /* On success, updating lmt base of secondary pf_funcs
+ * with primary pf_func's lmt base.
+ */
dev->lmt_base = roc_idev_lmt_base_addr_get();
return rc;
}
dev->pf_func, rc);
}
- if (dev_is_vf(dev)) {
- /* VF BAR4 should always be sufficient enough to
- * hold LMT lines.
- */
- if (pci_dev->mem_resource[4].len <
- (RVU_LMT_LINE_MAX * RVU_LMT_SZ)) {
- plt_err("Not enough bar4 space for lmt lines");
- return -EFAULT;
- }
+ /* Allocating memory for LMT region */
+ sprintf(name, "LMT_MAP%x", dev->pf_func);
- dev->lmt_base = dev->bar4;
- } else {
- uint64_t bar4_mbox_sz = MBOX_SIZE;
-
- /* PF BAR4 should always be sufficient enough to
- * hold PF-AF MBOX + PF-VF MBOX + LMT lines.
- */
- if (pci_dev->mem_resource[4].len <
- (bar4_mbox_sz + (RVU_LMT_LINE_MAX * RVU_LMT_SZ))) {
- plt_err("Not enough bar4 space for lmt lines and mbox");
- return -EFAULT;
- }
+ /* Setting alignment to ensure correct masking for resetting to lmt base
+ * of a core after all lmt lines under that core are used.
+ * Alignment value LMT_REGION_SIZE to handle the case where all lines
+ * are used by 1 core.
+ */
+ mz = plt_lmt_region_reserve_aligned(name, LMT_REGION_SIZE,
+ LMT_REGION_SIZE);
+ if (!mz) {
+ plt_err("Memory alloc failed: %s", strerror(errno));
+ goto fail;
+ }
- /* LMT base is just after total VF MBOX area */
- bar4_mbox_sz += (MBOX_SIZE * dev_pf_total_vfs(pci_dev));
- dev->lmt_base = dev->bar4 + bar4_mbox_sz;
+ /* Share the IOVA address with Kernel */
+ rc = dev_setup_shared_lmt_region(dev->mbox, true, mz->iova);
+ if (rc) {
+ errno = rc;
+ goto free;
}
+ dev->lmt_base = mz->iova;
+ dev->lmt_mz = mz;
/* Base LMT address should be chosen from only those pci funcs which
* participate in LMT shared mode.
*/
}
return 0;
+free:
+ plt_memzone_free(mz);
+fail:
+ return -errno;
}
int
goto iounmap;
/* Setup LMT line base */
- rc = dev_lmt_setup(pci_dev, dev);
+ rc = dev_lmt_setup(dev);
if (rc)
goto iounmap;
/* Clear references to this pci dev */
npa_lf_fini();
+ /* Releasing memory allocated for lmt region */
+ if (dev->lmt_mz)
+ plt_memzone_free(dev->lmt_mz);
+
mbox_unregister_irq(pci_dev, dev);
if (!dev_is_vf(dev))