net/qede/base: support mapped doorbell BARs for VF
authorRasesh Mody <rasesh.mody@cavium.com>
Tue, 19 Sep 2017 01:51:33 +0000 (18:51 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 6 Oct 2017 00:49:49 +0000 (02:49 +0200)
Determines whether VF utilizes doorbells via limited register bar or via
the doorbell bar and return the size of the HW doorbell bar via acquire
response. By doing that limit the VF CIDs to an amount that would make sure
doorbells for all CIDs fall within the bar.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
drivers/net/qede/base/ecore.h
drivers/net/qede/base/ecore_dev.c
drivers/net/qede/base/ecore_sriov.c
drivers/net/qede/base/ecore_vf.c
drivers/net/qede/base/ecore_vf.h
drivers/net/qede/base/ecore_vfpf_if.h

index 3bc1b20..3b51fc2 100644 (file)
@@ -546,6 +546,11 @@ struct ecore_ufp_info {
        u8 tc;
 };
 
+enum BAR_ID {
+       BAR_ID_0,       /* used for GRC */
+       BAR_ID_1        /* Used for doorbells */
+};
+
 struct ecore_hwfn {
        struct ecore_dev                *p_dev;
        u8                              my_id;          /* ID inside the PF */
index 283c65b..0568470 100644 (file)
@@ -363,11 +363,6 @@ void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
 /* Derived */
 #define ECORE_MIN_PWM_REGION   (ECORE_WID_SIZE * ECORE_MIN_DPIS)
 
-enum BAR_ID {
-       BAR_ID_0,               /* used for GRC */
-       BAR_ID_1                /* Used for doorbells */
-};
-
 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
                             struct ecore_ptt *p_ptt,
                             enum BAR_ID bar_id)
@@ -376,13 +371,8 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
                       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
        u32 val;
 
-       if (IS_VF(p_hwfn->p_dev)) {
-               /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be
-                * read from actual register, but we're currently not using
-                * it for actual doorbelling.
-                */
-               return 1 << 17;
-       }
+       if (IS_VF(p_hwfn->p_dev))
+               return ecore_vf_hw_bar_size(p_hwfn, bar_id);
 
        val = ecore_rd(p_hwfn, p_ptt, bar_reg);
        if (val)
index ed9ace2..a1d4982 100644 (file)
@@ -1538,7 +1538,62 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
        OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
 }
 
+/* Returns either 0, or log(size) */
+static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt)
+{
+       u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+       if (val)
+               return val + 11;
+       return 0;
+}
+
+static void
+ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  struct ecore_vf_info *p_vf,
+                                  struct vf_pf_resc_request *p_req,
+                                  struct pf_vf_resc *p_resp)
+{
+       u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+       u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
+                    DB_ADDR_VF(0, DQ_DEMS_LEGACY);
+       u32 bar_size;
+
+       p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
+
+       /* If VF didn't bother asking for QIDs than don't bother limiting
+        * number of CIDs. The VF doesn't care about the number, and this
+        * has the likely result of causing an additional acquisition.
+        */
+       if (!(p_vf->acquire.vfdev_info.capabilities &
+             VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+               return;
+
+       /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+        * that would make sure doorbells for all CIDs fall within the bar.
+        * If it doesn't, make sure regview window is sufficient.
+        */
+       if (p_vf->acquire.vfdev_info.capabilities &
+           VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+               bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
+               if (bar_size)
+                       bar_size = 1 << bar_size;
+
+               if (ECORE_IS_CMT(p_hwfn->p_dev))
+                       bar_size /= 2;
+       } else {
+               bar_size = PXP_VF_BAR0_DQ_LENGTH;
+       }
+
+       if (bar_size / db_size < 256)
+               p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
+                                             (u8)(bar_size / db_size));
+}
+
 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt,
                                        struct ecore_vf_info *p_vf,
                                        struct vf_pf_resc_request *p_req,
                                        struct pf_vf_resc *p_resp)
@@ -1573,9 +1628,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
        p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
                                              p_req->num_vlan_filters);
 
-       p_resp->num_cids =
-               OSAL_MIN_T(u8, p_req->num_cids,
-                          p_hwfn->pf_params.eth_pf_params.num_vf_cons);
+       ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
 
        /* This isn't really needed/enforced, but some legacy VFs might depend
         * on the correct filling of this field.
@@ -1739,6 +1792,10 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
        if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
                pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
 
+       /* Share the sizes of the bars with VF */
+       resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
+                                                            p_ptt);
+
        ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
 
        OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
@@ -1764,7 +1821,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
        /* Fill resources available to VF; Make sure there are enough to
         * satisfy the VF's request.
         */
-       vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, vf,
+       vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
                                                    &req->resc_request, resc);
        if (vfpf_status != PFVF_STATUS_SUCCESS)
                goto out;
index e84f97a..b78d735 100644 (file)
@@ -151,6 +151,69 @@ static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn,
        p_qid_tlv->qid = p_cid->qid_usage_idx;
 }
 
+enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn,
+                                         bool b_final)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_def_resp_tlv *resp;
+       struct vfpf_first_tlv *req;
+       u32 size;
+       enum _ecore_status_t rc;
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+       /* add list termination tlv */
+       ecore_add_tlv(&p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       resp = &p_iov->pf2vf_reply->default_resp;
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+       if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
+               rc = ECORE_AGAIN;
+
+       ecore_vf_pf_req_end(p_hwfn, rc);
+       if (!b_final)
+               return rc;
+
+       p_hwfn->b_int_enabled = 0;
+
+       if (p_iov->vf2pf_request)
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_iov->vf2pf_request,
+                                      p_iov->vf2pf_request_phys,
+                                      sizeof(union vfpf_tlvs));
+       if (p_iov->pf2vf_reply)
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_iov->pf2vf_reply,
+                                      p_iov->pf2vf_reply_phys,
+                                      sizeof(union pfvf_tlvs));
+
+       if (p_iov->bulletin.p_virt) {
+               size = sizeof(struct ecore_bulletin_content);
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_iov->bulletin.p_virt,
+                                      p_iov->bulletin.phys,
+                                      size);
+       }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+       OSAL_MUTEX_DEALLOC(&p_iov->mutex);
+#endif
+
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
+       p_hwfn->vf_iov_info = OSAL_NULL;
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
+{
+       return _ecore_vf_pf_release(p_hwfn, true);
+}
+
 #define VF_ACQUIRE_THRESH 3
 static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
                                            struct vf_pf_resc_request *p_req,
@@ -217,6 +280,11 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
        /* Fill capability field with any non-deprecated config we support */
        req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
 
+       /* If we've mapped the doorbell bar, try using queue qids */
+       if (p_iov->b_doorbell_bar)
+               req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
+                                               VFPF_ACQUIRE_CAP_QUEUE_QIDS;
+
        /* pf 2 vf bulletin board address */
        req->bulletin_addr = p_iov->bulletin.phys;
        req->bulletin_size = p_iov->bulletin.size;
@@ -380,10 +448,28 @@ exit:
        return rc;
 }
 
+u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
+                        enum BAR_ID bar_id)
+{
+       u32 bar_size;
+
+       /* Regview size is fixed */
+       if (bar_id == BAR_ID_0)
+               return 1 << 17;
+
+       /* Doorbell is received from PF */
+       bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
+       if (bar_size)
+               return 1 << bar_size;
+       return 0;
+}
+
 enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
 {
+       struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev);
        struct ecore_vf_iov *p_iov;
        u32 reg;
+       enum _ecore_status_t rc;
 
        /* Set number of hwfns - might be overridden once leading hwfn learns
         * actual configuration from PF.
@@ -391,10 +477,6 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
        if (IS_LEAD_HWFN(p_hwfn))
                p_hwfn->p_dev->num_hwfns = 1;
 
-       /* Set the doorbell bar. Assumption: regview is set */
-       p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
-           PXP_VF_BAR0_START_DQ;
-
        reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
        p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
 
@@ -409,6 +491,31 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
                return ECORE_NOMEM;
        }
 
+       /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
+        * value, but there are several incompatibily scenarios where that
+        * would be incorrect and we'd need to override it.
+        */
+       if (p_hwfn->doorbells == OSAL_NULL) {
+               p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+                                                    PXP_VF_BAR0_START_DQ;
+       } else if (p_hwfn == p_lead) {
+               /* For leading hw-function, value is always correct, but need
+                * to handle scenario where legacy PF would not support 100g
+                * mapped bars later.
+                */
+               p_iov->b_doorbell_bar = true;
+       } else {
+               /* here, value would be correct ONLY if the leading hwfn
+                * received indication that mapped-bars are supported.
+                */
+               if (p_lead->vf_iov_info->b_doorbell_bar)
+                       p_iov->b_doorbell_bar = true;
+               else
+                       p_hwfn->doorbells = (u8 OSAL_IOMEM *)
+                                           p_hwfn->regview +
+                                           PXP_VF_BAR0_START_DQ;
+       }
+
        /* Allocate vf2pf msg */
        p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
                                                         &p_iov->
@@ -460,7 +567,35 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
 
        p_hwfn->hw_info.personality = ECORE_PCI_ETH;
 
-       return ecore_vf_pf_acquire(p_hwfn);
+       rc = ecore_vf_pf_acquire(p_hwfn);
+
+       /* If VF is 100g using a mapped bar and PF is too old to support that,
+        * acquisition would succeed - but the VF would have no way knowing
+        * the size of the doorbell bar configured in HW and thus will not
+        * know how to split it for 2nd hw-function.
+        * In this case we re-try without the indication of the mapped
+        * doorbell.
+        */
+       if (rc == ECORE_SUCCESS &&
+           p_iov->b_doorbell_bar &&
+           !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
+           ECORE_IS_CMT(p_hwfn->p_dev)) {
+               rc = _ecore_vf_pf_release(p_hwfn, false);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+
+               p_iov->b_doorbell_bar = false;
+               p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+                                                    PXP_VF_BAR0_START_DQ;
+               rc = ecore_vf_pf_acquire(p_hwfn);
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
+                  p_hwfn->regview, p_hwfn->doorbells,
+                  p_hwfn->p_dev->doorbells);
+
+       return rc;
 
 free_vf2pf_request:
        OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
@@ -1304,59 +1439,6 @@ exit:
        return rc;
 }
 
-enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
-{
-       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
-       struct pfvf_def_resp_tlv *resp;
-       struct vfpf_first_tlv *req;
-       u32 size;
-       enum _ecore_status_t rc;
-
-       /* clear mailbox and prep first tlv */
-       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
-
-       /* add list termination tlv */
-       ecore_add_tlv(&p_iov->offset,
-                     CHANNEL_TLV_LIST_END,
-                     sizeof(struct channel_list_end_tlv));
-
-       resp = &p_iov->pf2vf_reply->default_resp;
-       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
-
-       if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
-               rc = ECORE_AGAIN;
-
-       ecore_vf_pf_req_end(p_hwfn, rc);
-
-       p_hwfn->b_int_enabled = 0;
-
-       if (p_iov->vf2pf_request)
-               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
-                                      p_iov->vf2pf_request,
-                                      p_iov->vf2pf_request_phys,
-                                      sizeof(union vfpf_tlvs));
-       if (p_iov->pf2vf_reply)
-               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
-                                      p_iov->pf2vf_reply,
-                                      p_iov->pf2vf_reply_phys,
-                                      sizeof(union pfvf_tlvs));
-
-       if (p_iov->bulletin.p_virt) {
-               size = sizeof(struct ecore_bulletin_content);
-               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
-                                      p_iov->bulletin.p_virt,
-                                      p_iov->bulletin.phys, size);
-       }
-
-#ifdef CONFIG_ECORE_LOCK_ALLOC
-       OSAL_MUTEX_DEALLOC(&p_iov->mutex);
-#endif
-
-       OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
-
-       return rc;
-}
-
 void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
                              struct ecore_filter_mcast *p_filter_cmd)
 {
index 8c44d37..fdb0fe0 100644 (file)
@@ -49,6 +49,11 @@ struct ecore_vf_iov {
         * compatibility [with older PFs] we'd still need to store these.
         */
        struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
+
+       /* Determines whether VF utilizes doorbells via limited register
+        * bar or via the doorbell bar.
+        */
+       bool b_doorbell_bar;
 };
 
 /**
@@ -304,5 +309,8 @@ ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
                                struct ecore_tunnel_info *p_tunn);
 
 void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
+
+u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
+                    enum BAR_ID bar_id);
 #endif
 #endif /* __ECORE_VF_H__ */
index d632423..3ccc766 100644 (file)
@@ -101,6 +101,12 @@ struct vfpf_acquire_tlv {
         * this, and use the legacy CID scheme.
         */
 #define VFPF_ACQUIRE_CAP_QUEUE_QIDS    (1 << 2)
+
+       /* The VF is using the physical bar. While this is mostly internal
+        * to the VF, might affect the number of CIDs supported assuming
+        * QUEUE_QIDS is set.
+        */
+#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR  (1 << 3)
                u64 capabilities;
                u8 fw_major;
                u8 fw_minor;
@@ -190,7 +196,8 @@ struct pfvf_acquire_resp_tlv {
                u16 chip_rev;
                u8 dev_type;
 
-               u8 padding;
+               /* Doorbell bar size configured in HW: log(size) or 0 */
+               u8 bar_size;
 
                struct pfvf_stats_info stats_info;