net/qede/base: refine error handling
[dpdk.git] / drivers / net / qede / base / ecore_sriov.c
index 53d6b36..0279709 100644 (file)
 #include "ecore_init_fw_funcs.h"
 #include "ecore_sp_commands.h"
 
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+                                                 u8 opcode,
+                                                 __le16 echo,
+                                                 union event_ring_data *data,
+                                                 u8 fw_return_code);
+
 const char *ecore_channel_tlvs_string[] = {
        "CHANNEL_TLV_NONE",     /* ends tlv sequence */
        "CHANNEL_TLV_ACQUIRE",
@@ -584,13 +590,15 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
 
        p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
        if (!p_sriov) {
-               DP_NOTICE(p_hwfn, true,
-                         "Failed to allocate `struct ecore_sriov'\n");
+               DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
                return ECORE_NOMEM;
        }
 
        p_hwfn->pf_iov_info = p_sriov;
 
+       ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+                                   ecore_sriov_eqe_event);
+
        return ecore_iov_allocate_vfdb(p_hwfn);
 }
 
@@ -604,6 +612,8 @@ void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
 
 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
 {
+       ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
        if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
                ecore_iov_free_vfdb(p_hwfn);
                OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
@@ -637,7 +647,7 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
        p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
                                        sizeof(*p_dev->p_iov_info));
        if (!p_dev->p_iov_info) {
-               DP_NOTICE(p_hwfn, true,
+               DP_NOTICE(p_hwfn, false,
                          "Can't support IOV due to lack of memory\n");
                return ECORE_NOMEM;
        }
@@ -854,6 +864,11 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
        u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
        enum _ecore_status_t rc = ECORE_SUCCESS;
 
+       /* It's possible VF was previously considered malicious -
+        * clear the indication even if we're only going to disable VF.
+        */
+       vf->b_malicious = false;
+
        if (vf->to_disable)
                return ECORE_SUCCESS;
 
@@ -866,8 +881,6 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
 
        ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
 
-       /* It's possible VF was previously considered malicious */
-       vf->b_malicious = false;
        rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
                                             vf->abs_vf_id, vf->num_sbs);
        if (rc != ECORE_SUCCESS)
@@ -1370,7 +1383,7 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
        mbx->sw_mbx.response_size =
            length + sizeof(struct channel_list_end_tlv);
 
-       if (!p_hwfn->p_dev->b_hw_channel)
+       if (!p_vf->b_hw_channel)
                return;
 #endif
 
@@ -1386,14 +1399,18 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
                             (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
                             &params);
 
-       ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
-                            mbx->req_virt->first_tlv.reply_address,
-                            sizeof(u64) / 4, &params);
-
+       /* Once PF copies the rc to the VF, the latter can continue and
+        * and send an additional message. So we have to make sure the
+        * channel would be re-set to ready prior to that.
+        */
        REG_WR(p_hwfn,
               GTT_BAR0_MAP_REG_USDM_RAM +
               USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
 
+       ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+                            mbx->req_virt->first_tlv.reply_address,
+                            sizeof(u64) / 4, &params);
+
        OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
 }
 
@@ -1527,7 +1544,62 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
        OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
 }
 
+/* Returns either 0, or log(size) */
+static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt)
+{
+       u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+       if (val)
+               return val + 11;
+       return 0;
+}
+
+static void
+ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  struct ecore_vf_info *p_vf,
+                                  struct vf_pf_resc_request *p_req,
+                                  struct pf_vf_resc *p_resp)
+{
+       u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+       u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
+                    DB_ADDR_VF(0, DQ_DEMS_LEGACY);
+       u32 bar_size;
+
+       p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
+
+       /* If VF didn't bother asking for QIDs than don't bother limiting
+        * number of CIDs. The VF doesn't care about the number, and this
+        * has the likely result of causing an additional acquisition.
+        */
+       if (!(p_vf->acquire.vfdev_info.capabilities &
+             VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+               return;
+
+       /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+        * that would make sure doorbells for all CIDs fall within the bar.
+        * If it doesn't, make sure regview window is sufficient.
+        */
+       if (p_vf->acquire.vfdev_info.capabilities &
+           VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+               bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
+               if (bar_size)
+                       bar_size = 1 << bar_size;
+
+               if (ECORE_IS_CMT(p_hwfn->p_dev))
+                       bar_size /= 2;
+       } else {
+               bar_size = PXP_VF_BAR0_DQ_LENGTH;
+       }
+
+       if (bar_size / db_size < 256)
+               p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
+                                             (u8)(bar_size / db_size));
+}
+
 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt,
                                        struct ecore_vf_info *p_vf,
                                        struct vf_pf_resc_request *p_req,
                                        struct pf_vf_resc *p_resp)
@@ -1562,9 +1634,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
        p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
                                              p_req->num_vlan_filters);
 
-       p_resp->num_cids =
-               OSAL_MIN_T(u8, p_req->num_cids,
-                          p_hwfn->pf_params.eth_pf_params.num_vf_cons);
+       ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
 
        /* This isn't really needed/enforced, but some legacy VFs might depend
         * on the correct filling of this field.
@@ -1687,7 +1757,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
        }
 
        /* On 100g PFs, prevent old VFs from loading */
-       if ((p_hwfn->p_dev->num_hwfns > 1) &&
+       if (ECORE_IS_CMT(p_hwfn->p_dev) &&
            !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
                DP_INFO(p_hwfn,
                        "VF[%d] is running an old driver that doesn't support"
@@ -1715,11 +1785,11 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
        /* fill in pfdev info */
        pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
        pfdev_info->db_size = 0;        /* @@@ TBD MichalK Vf Doorbells */
-       pfdev_info->indices_per_sb = PIS_PER_SB;
+       pfdev_info->indices_per_sb = PIS_PER_SB_E4;
 
        pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
                                   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
-       if (p_hwfn->p_dev->num_hwfns > 1)
+       if (ECORE_IS_CMT(p_hwfn->p_dev))
                pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
 
        /* Share our ability to use multiple queue-ids only with VFs
@@ -1728,6 +1798,10 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
        if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
                pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
 
+       /* Share the sizes of the bars with VF */
+       resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
+                                                            p_ptt);
+
        ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
 
        OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
@@ -1753,7 +1827,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
        /* Fill resources available to VF; Make sure there are enough to
         * satisfy the VF's request.
         */
-       vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, vf,
+       vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
                                                    &req->resc_request, resc);
        if (vfpf_status != PFVF_STATUS_SUCCESS)
                goto out;
@@ -1893,7 +1967,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
        if (!p_vf->vport_instance)
                return ECORE_INVAL;
 
-       if (events & (1 << MAC_ADDR_FORCED)) {
+       if ((events & (1 << MAC_ADDR_FORCED)) ||
+           p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) {
                /* Since there's no way [currently] of removing the MAC,
                 * we can always assume this means we need to force it.
                 */
@@ -1914,7 +1989,11 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
                        return rc;
                }
 
-               p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+               if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+                       p_vf->configured_features |=
+                               1 << VFPF_BULLETIN_MAC_ADDR;
+               else
+                       p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
        }
 
        if (events & (1 << VLAN_ADDR_FORCED)) {
@@ -2105,6 +2184,7 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
        u8 status = PFVF_STATUS_SUCCESS;
        enum _ecore_status_t rc;
 
+       OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
        vf->vport_instance--;
        vf->spoof_chk = false;
 
@@ -3137,8 +3217,8 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
                                   "Upper-layer prevents said VF"
                                   " configuration\n");
                else
-                       DP_NOTICE(p_hwfn, true,
-                                 "No feature tlvs found for vport update\n");
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "No feature tlvs found for vport update\n");
                status = PFVF_STATUS_NOT_SUPPORTED;
                goto out;
        }
@@ -3343,12 +3423,13 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
                goto out;
        }
 
-       /* Update shadow copy of the VF configuration */
+       /* Update shadow copy of the VF configuration. In case shadow indicates
+        * the action should be blocked return success to VF to imitate the
+        * firmware behaviour in such case.
+        */
        if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
-           ECORE_SUCCESS) {
-               status = PFVF_STATUS_FAILURE;
+           ECORE_SUCCESS)
                goto out;
-       }
 
        /* Determine if the unicast filtering is acceptible by PF */
        if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
@@ -3723,11 +3804,11 @@ static enum _ecore_status_t
 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
                          struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
 {
-       u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+       u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
        int i, cnt;
 
        /* Read initial consumers & producers */
-       for (i = 0; i < MAX_NUM_VOQS; i++) {
+       for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
                u32 prod;
 
                cons[i] = ecore_rd(p_hwfn, p_ptt,
@@ -3742,7 +3823,7 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
        /* Wait for consumers to pass the producers */
        i = 0;
        for (cnt = 0; cnt < 50; cnt++) {
-               for (; i < MAX_NUM_VOQS; i++) {
+               for (; i < MAX_NUM_VOQS_E4; i++) {
                        u32 tmp;
 
                        tmp = ecore_rd(p_hwfn, p_ptt,
@@ -3752,7 +3833,7 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
                                break;
                }
 
-               if (i == MAX_NUM_VOQS)
+               if (i == MAX_NUM_VOQS_E4)
                        break;
 
                OSAL_MSLEEP(20);
@@ -4181,24 +4262,31 @@ static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
 {
        struct ecore_vf_info *p_vf;
 
-       p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
+       p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
 
        if (!p_vf)
                return;
 
-       DP_INFO(p_hwfn,
-               "VF [%d] - Malicious behavior [%02x]\n",
-               p_vf->abs_vf_id, p_data->errId);
+       if (!p_vf->b_malicious) {
+               DP_NOTICE(p_hwfn, false,
+                         "VF [%d] - Malicious behavior [%02x]\n",
+                         p_vf->abs_vf_id, p_data->err_id);
 
-       p_vf->b_malicious = true;
+               p_vf->b_malicious = true;
+       } else {
+               DP_INFO(p_hwfn,
+                       "VF [%d] - Malicious behavior [%02x]\n",
+                       p_vf->abs_vf_id, p_data->err_id);
+       }
 
        OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
 }
 
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
-                                          u8 opcode,
-                                          __le16 echo,
-                                          union event_ring_data *data)
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+                                                 u8 opcode,
+                                                 __le16 echo,
+                                                 union event_ring_data *data,
+                                                 u8 OSAL_UNUSED fw_return_code)
 {
        switch (opcode) {
        case COMMON_EVENT_VF_PF_CHANNEL:
@@ -4237,7 +4325,7 @@ u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
                        return i;
 
 out:
-       return E4_MAX_NUM_VFS;
+       return MAX_NUM_VFS_E4;
 }
 
 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
@@ -4286,7 +4374,11 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
                return;
        }
 
-       feature = 1 << MAC_ADDR_FORCED;
+       if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+               feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+       else
+               feature = 1 << MAC_ADDR_FORCED;
+
        OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
 
        vf_info->bulletin.p_virt->valid_bitmap |= feature;
@@ -4327,9 +4419,13 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
 
        vf_info->bulletin.p_virt->valid_bitmap |= feature;
 
+       if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+               ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+
        return ECORE_SUCCESS;
 }
 
+#ifndef LINUX_REMOVE
 enum _ecore_status_t
 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
                                               bool b_untagged_only, int vfid)
@@ -4386,6 +4482,7 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
 
        *opaque_fid = vf_info->opaque_fid;
 }
+#endif
 
 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
                                        u16 pvid, int vfid)
@@ -4573,6 +4670,22 @@ u32 ecore_iov_pfvf_msg_length(void)
        return sizeof(union pfvf_tlvs);
 }
 
+u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
+                                     u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf || !p_vf->bulletin.p_virt)
+               return OSAL_NULL;
+
+       if (!(p_vf->bulletin.p_virt->valid_bitmap &
+               (1 << VFPF_BULLETIN_MAC_ADDR)))
+               return OSAL_NULL;
+
+       return p_vf->bulletin.p_virt->mac;
+}
+
 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
 {
        struct ecore_vf_info *p_vf;
@@ -4606,6 +4719,7 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
                                                 struct ecore_ptt *p_ptt,
                                                 int vfid, int val)
 {
+       struct ecore_mcp_link_state *p_link;
        struct ecore_vf_info *vf;
        u8 abs_vp_id = 0;
        enum _ecore_status_t rc;
@@ -4619,7 +4733,10 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
        if (rc != ECORE_SUCCESS)
                return rc;
 
-       return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+       p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+       return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+                                  p_link->speed);
 }
 
 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
@@ -4749,3 +4866,17 @@ ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
        else
                return 0;
 }
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
+                                bool b_is_hw)
+{
+       struct ecore_vf_info *vf_info;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+       if (!vf_info)
+               return;
+
+       vf_info->b_hw_channel = b_is_hw;
+}
+#endif