common/mlx5: fix device list operations concurrency
[dpdk.git] / drivers / common / cnxk / roc_dev.c
index 4cd5978..4e20437 100644 (file)
@@ -163,6 +163,39 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
                rsp->rc = msg->rc;
                rsp->pcifunc = msg->pcifunc;
 
+               /* Whenever a PF comes up, AF sends the link status to it but
+                * when VF comes up no such event is sent to respective VF.
+                * Using MBOX_MSG_NIX_LF_START_RX response from AF for the
+                * purpose and send the link status of PF to VF.
+                */
+               if (msg->id == MBOX_MSG_NIX_LF_START_RX) {
+                       /* Send link status to VF */
+                       struct cgx_link_user_info linfo;
+                       struct mbox_msghdr *vf_msg;
+                       size_t sz;
+
+                       /* Get the link status */
+                       memset(&linfo, 0, sizeof(struct cgx_link_user_info));
+                       if (dev->ops && dev->ops->link_status_get)
+                               dev->ops->link_status_get(dev->roc_nix, &linfo);
+
+                       sz = PLT_ALIGN(mbox_id2size(MBOX_MSG_CGX_LINK_EVENT),
+                                      MBOX_MSG_ALIGN);
+                       /* Prepare the message to be sent */
+                       vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz);
+                       if (vf_msg) {
+                               mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg);
+                               memcpy((uint8_t *)vf_msg +
+                                      sizeof(struct mbox_msghdr), &linfo,
+                                      sizeof(struct cgx_link_user_info));
+
+                               vf_msg->rc = msg->rc;
+                               vf_msg->pcifunc = msg->pcifunc;
+                               /* Send to VF */
+                               mbox_msg_send(&dev->mbox_vfpf_up, vf);
+                       }
+               }
+
                offset = mbox->rx_start + msg->next_msgoff;
        }
        plt_spinlock_unlock(&mdev->mbox_lock);
@@ -870,6 +903,8 @@ dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev)
                break;
        case PCI_DEVID_CNXK_RVU_SSO_TIM_VF:
        case PCI_DEVID_CNXK_RVU_NPA_VF:
+       case PCI_DEVID_CN10K_RVU_CPT_VF:
+       case PCI_DEVID_CN9K_RVU_CPT_VF:
        case PCI_DEVID_CNXK_RVU_AF_VF:
        case PCI_DEVID_CNXK_RVU_VF:
        case PCI_DEVID_CNXK_RVU_SDP_VF:
@@ -915,43 +950,30 @@ dev_vf_mbase_put(struct plt_pci_device *pci_dev, uintptr_t vf_mbase)
        mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs);
 }
 
-static uint16_t
-dev_pf_total_vfs(struct plt_pci_device *pci_dev)
-{
-       uint16_t total_vfs = 0;
-       int sriov_pos, rc;
-
-       sriov_pos =
-               plt_pci_find_ext_capability(pci_dev, ROC_PCI_EXT_CAP_ID_SRIOV);
-       if (sriov_pos <= 0) {
-               plt_warn("Unable to find SRIOV cap, rc=%d", sriov_pos);
-               return 0;
-       }
-
-       rc = plt_pci_read_config(pci_dev, &total_vfs, 2,
-                                sriov_pos + ROC_PCI_SRIOV_TOTAL_VF);
-       if (rc < 0) {
-               plt_warn("Unable to read SRIOV cap, rc=%d", rc);
-               return 0;
-       }
-
-       return total_vfs;
-}
-
 static int
-dev_setup_shared_lmt_region(struct mbox *mbox)
+dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova)
 {
        struct lmtst_tbl_setup_req *req;
 
        req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
-       req->pcifunc = idev_lmt_pffunc_get();
+       /* This pcifunc is defined with primary pcifunc whose LMT address
+        * will be shared. If call contains valid IOVA, following pcifunc
+        * field is of no use.
+        */
+       req->pcifunc = valid_iova ? 0 : idev_lmt_pffunc_get();
+       req->use_local_lmt_region = valid_iova;
+       req->lmt_iova = iova;
 
        return mbox_process(mbox);
 }
 
+/* Total no of lines * size of each lmtline */
+#define LMT_REGION_SIZE (ROC_NUM_LMT_LINES * ROC_LMT_LINE_SZ)
 static int
-dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
+dev_lmt_setup(struct dev *dev)
 {
+       char name[PLT_MEMZONE_NAMESIZE];
+       const struct plt_memzone *mz;
        struct idev_cfg *idev;
        int rc;
 
@@ -965,8 +987,11 @@ dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
        /* Set common lmt region from second pf_func onwards. */
        if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() &&
            dev->pf_func != idev_lmt_pffunc_get()) {
-               rc = dev_setup_shared_lmt_region(dev->mbox);
+               rc = dev_setup_shared_lmt_region(dev->mbox, false, 0);
                if (!rc) {
+                       /* On success, updating lmt base of secondary pf_funcs
+                        * with primary pf_func's lmt base.
+                        */
                        dev->lmt_base = roc_idev_lmt_base_addr_get();
                        return rc;
                }
@@ -975,34 +1000,30 @@ dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
                        dev->pf_func, rc);
        }
 
-       if (dev_is_vf(dev)) {
-               /* VF BAR4 should always be sufficient enough to
-                * hold LMT lines.
-                */
-               if (pci_dev->mem_resource[4].len <
-                   (RVU_LMT_LINE_MAX * RVU_LMT_SZ)) {
-                       plt_err("Not enough bar4 space for lmt lines");
-                       return -EFAULT;
-               }
-
-               dev->lmt_base = dev->bar4;
-       } else {
-               uint64_t bar4_mbox_sz = MBOX_SIZE;
+       /* Allocating memory for LMT region */
+       sprintf(name, "LMT_MAP%x", dev->pf_func);
 
-               /* PF BAR4 should always be sufficient enough to
-                * hold PF-AF MBOX + PF-VF MBOX + LMT lines.
-                */
-               if (pci_dev->mem_resource[4].len <
-                   (bar4_mbox_sz + (RVU_LMT_LINE_MAX * RVU_LMT_SZ))) {
-                       plt_err("Not enough bar4 space for lmt lines and mbox");
-                       return -EFAULT;
-               }
+       /* Setting alignment to ensure correct masking for resetting to lmt base
+        * of a core after all lmt lines under that core are used.
+        * Alignment value LMT_REGION_SIZE to handle the case where all lines
+        * are used by 1 core.
+        */
+       mz = plt_lmt_region_reserve_aligned(name, LMT_REGION_SIZE,
+                                           LMT_REGION_SIZE);
+       if (!mz) {
+               plt_err("Memory alloc failed: %s", strerror(errno));
+               goto fail;
+       }
 
-               /* LMT base is just after total VF MBOX area */
-               bar4_mbox_sz += (MBOX_SIZE * dev_pf_total_vfs(pci_dev));
-               dev->lmt_base = dev->bar4 + bar4_mbox_sz;
+       /* Share the IOVA address with Kernel */
+       rc = dev_setup_shared_lmt_region(dev->mbox, true, mz->iova);
+       if (rc) {
+               errno = rc;
+               goto free;
        }
 
+       dev->lmt_base = mz->iova;
+       dev->lmt_mz = mz;
        /* Base LMT address should be chosen from only those pci funcs which
         * participate in LMT shared mode.
         */
@@ -1016,6 +1037,10 @@ dev_lmt_setup(struct plt_pci_device *pci_dev, struct dev *dev)
        }
 
        return 0;
+free:
+       plt_memzone_free(mz);
+fail:
+       return -errno;
 }
 
 int
@@ -1125,8 +1150,12 @@ dev_init(struct dev *dev, struct plt_pci_device *pci_dev)
        }
        dev->mbox_active = 1;
 
+       rc = npa_lf_init(dev, pci_dev);
+       if (rc)
+               goto iounmap;
+
        /* Setup LMT line base */
-       rc = dev_lmt_setup(pci_dev, dev);
+       rc = dev_lmt_setup(dev);
        if (rc)
                goto iounmap;
 
@@ -1150,6 +1179,17 @@ dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
        struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct mbox *mbox;
 
+       /* Check if this dev hosts npalf and has 1+ refs */
+       if (idev_npa_lf_active(dev) > 1)
+               return -EAGAIN;
+
+       /* Clear references to this pci dev */
+       npa_lf_fini();
+
+       /* Releasing memory allocated for lmt region */
+       if (dev->lmt_mz)
+               plt_memzone_free(dev->lmt_mz);
+
        mbox_unregister_irq(pci_dev, dev);
 
        if (!dev_is_vf(dev))