net/qede/base: upgrade to FW 8.37.7.0
[dpdk.git] / drivers / net / qede / base / ecore_dev.c
index a85d26d..b83f003 100644 (file)
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
  * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
  */
 
 #include "bcm_osal.h"
@@ -458,6 +456,12 @@ static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
        OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
 }
 
+static void ecore_dbg_user_data_free(struct ecore_hwfn *p_hwfn)
+{
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->dbg_user_info);
+       p_hwfn->dbg_user_info = OSAL_NULL;
+}
+
 void ecore_resc_free(struct ecore_dev *p_dev)
 {
        int i;
@@ -485,6 +489,7 @@ void ecore_resc_free(struct ecore_dev *p_dev)
                ecore_l2_free(p_hwfn);
                ecore_dmae_info_free(p_hwfn);
                ecore_dcbx_info_free(p_hwfn);
+               ecore_dbg_user_data_free(p_hwfn);
                /* @@@TBD Flush work-queue ? */
 
                /* destroy doorbell recovery mechanism */
@@ -513,11 +518,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
        /* feature flags */
        if (IS_ECORE_SRIOV(p_hwfn->p_dev))
                flags |= PQ_FLAGS_VFS;
+       if (IS_ECORE_PACING(p_hwfn))
+               flags |= PQ_FLAGS_RLS;
 
        /* protocol flags */
        switch (p_hwfn->hw_info.personality) {
        case ECORE_PCI_ETH:
-               flags |= PQ_FLAGS_MCOS;
+               if (!IS_ECORE_PACING(p_hwfn))
+                       flags |= PQ_FLAGS_MCOS;
                break;
        case ECORE_PCI_FCOE:
                flags |= PQ_FLAGS_OFLD;
@@ -526,11 +534,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
                flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
                break;
        case ECORE_PCI_ETH_ROCE:
-               flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD;
+               flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+               if (!IS_ECORE_PACING(p_hwfn))
+                       flags |= PQ_FLAGS_MCOS;
                break;
        case ECORE_PCI_ETH_IWARP:
-               flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
-                        PQ_FLAGS_OFLD;
+               flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+               if (!IS_ECORE_PACING(p_hwfn))
+                       flags |= PQ_FLAGS_MCOS;
                break;
        default:
                DP_ERR(p_hwfn, "unknown personality %d\n",
@@ -837,7 +848,7 @@ u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
        return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
 }
 
-u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
 {
        u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
 
@@ -847,6 +858,23 @@ u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
        return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
 }
 
+u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+       u16 start_pq, pq, qm_pq_idx;
+
+       pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
+       start_pq = p_hwfn->qm_info.start_pq;
+       qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE;
+
+       if (qm_pq_idx > p_hwfn->qm_info.num_pqs) {
+               DP_ERR(p_hwfn,
+                      "qm_pq_idx %d must be smaller than %d\n",
+                       qm_pq_idx, p_hwfn->qm_info.num_pqs);
+       }
+
+       return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id;
+}
+
 /* Functions for creating specific types of pqs */
 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
 {
@@ -1313,7 +1341,14 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
                                  "Failed to allocate memory for dcbx structure\n");
                        goto alloc_err;
                }
-       }
+
+               rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, false,
+                                 "Failed to allocate dbg user info structure\n");
+                       goto alloc_err;
+               }
+       } /* hwfn loop */
 
        p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
                                         sizeof(*p_dev->reset_stats));
@@ -2389,6 +2424,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
        bool b_default_mtu = true;
        struct ecore_hwfn *p_hwfn;
        enum _ecore_status_t rc = ECORE_SUCCESS;
+       u16 ether_type;
        int i;
 
        if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
@@ -2421,6 +2457,25 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
                if (rc != ECORE_SUCCESS)
                        return rc;
 
+               if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+                                                  &p_dev->mf_bits) ||
+                                    OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+                                                  &p_dev->mf_bits))) {
+                       if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+                                         &p_dev->mf_bits))
+                               ether_type = ETHER_TYPE_VLAN;
+                       else
+                               ether_type = ETHER_TYPE_QINQ;
+                       STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+                                    ether_type);
+                       STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+                                    ether_type);
+                       STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+                                    ether_type);
+                       STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+                                    ether_type);
+               }
+
                ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms);
 
                rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
@@ -2490,9 +2545,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
                        }
                }
 
-               /* Log and clean previous pglue_b errors if such exist */
-               ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
-               ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+               /* Log and clear previous pglue_b errors if such exist */
+               ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
 
                /* Enable the PF's internal FID_enable in the PXP */
                rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
@@ -2500,6 +2554,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
                if (rc != ECORE_SUCCESS)
                        goto load_err;
 
+               /* Clear the pglue_b was_error indication.
+                * In E4 it must be done after the BME and the internal
+                * FID_enable for the PF are set, since VDMs may cause the
+                * indication to be set again.
+                */
+               ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
                switch (load_code) {
                case FW_MSG_CODE_DRV_LOAD_ENGINE:
                        rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -2563,6 +2624,20 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
                p_hwfn->hw_init_done = true;
        }
 
+       if (IS_PF(p_dev)) {
+               /* Get pre-negotiated values for stag, bandwidth etc. */
+               p_hwfn = ECORE_LEADING_HWFN(p_dev);
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                          "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+               rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                  DRV_MSG_CODE_GET_OEM_UPDATES,
+                                  1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
+                                  &resp, &param);
+               if (rc != ECORE_SUCCESS)
+                       DP_NOTICE(p_hwfn, false,
+                                 "Failed to send GET_OEM_UPDATES attention request\n");
+       }
+
        if (IS_PF(p_dev)) {
                p_hwfn = ECORE_LEADING_HWFN(p_dev);
                drv_mb_param = STORM_FW_VERSION;
@@ -2572,17 +2647,23 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
                if (rc != ECORE_SUCCESS)
                        DP_INFO(p_hwfn, "Failed to update firmware version\n");
 
-               if (!b_default_mtu)
+               if (!b_default_mtu) {
                        rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
                                                      p_hwfn->hw_info.mtu);
-               if (rc != ECORE_SUCCESS)
-                       DP_INFO(p_hwfn, "Failed to update default mtu\n");
+                       if (rc != ECORE_SUCCESS)
+                               DP_INFO(p_hwfn, "Failed to update default mtu\n");
+               }
 
                rc = ecore_mcp_ov_update_driver_state(p_hwfn,
                                                      p_hwfn->p_main_ptt,
                                                ECORE_OV_DRIVER_STATE_DISABLED);
                if (rc != ECORE_SUCCESS)
                        DP_INFO(p_hwfn, "Failed to update driver state\n");
+
+               rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+                                                ECORE_OV_ESWITCH_NONE);
+               if (rc != ECORE_SUCCESS)
+                       DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
        }
 
        return rc;
@@ -2960,15 +3041,30 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
                                   FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
        }
 
-       if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
-               feat_num[ECORE_FCOE_CQ] =
-                       OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
-                                                            ECORE_CMDQS_CQS));
+       if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+           ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) {
+               u32 *p_storage_feat = ECORE_IS_FCOE_PERSONALITY(p_hwfn) ?
+                                     &feat_num[ECORE_FCOE_CQ] :
+                                     &feat_num[ECORE_ISCSI_CQ];
+               u32 limit = sb_cnt.cnt;
 
-       if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
-               feat_num[ECORE_ISCSI_CQ] =
-                       OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
-                                                            ECORE_CMDQS_CQS));
+               /* The number of queues should not exceed the number of FP SBs.
+                * In storage target, the queues are divided into pairs of a CQ
+                * and a CmdQ, and each pair uses a single SB. The limit in
+                * this case should allow a max ratio of 2:1 instead of 1:1.
+                */
+               if (p_hwfn->p_dev->b_is_target)
+                       limit *= 2;
+               *p_storage_feat = OSAL_MIN_T(u32, limit,
+                                            RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+
+               /* @DPDK */
+               /* The size of "cq_cmdq_sb_num_arr" in the fcoe/iscsi init
+                * ramrod is limited to "NUM_OF_GLOBAL_QUEUES / 2".
+                */
+               *p_storage_feat = OSAL_MIN_T(u32, *p_storage_feat,
+                                            (NUM_OF_GLOBAL_QUEUES / 2));
+       }
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
                   "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
@@ -3588,9 +3684,14 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
                break;
        case NVM_CFG1_GLOB_MF_MODE_UFP:
                p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
-                                        1 << ECORE_MF_UFP_SPECIFIC;
+                                        1 << ECORE_MF_UFP_SPECIFIC |
+                                        1 << ECORE_MF_8021Q_TAGGING;
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_BD:
+               p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+                                        1 << ECORE_MF_LLH_PROTO_CLSS |
+                                        1 << ECORE_MF_8021AD_TAGGING;
                break;
-
        case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
                p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
                                         1 << ECORE_MF_LLH_PROTO_CLSS |
@@ -3619,6 +3720,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
         */
        switch (mf_mode) {
        case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+       case NVM_CFG1_GLOB_MF_MODE_BD:
                p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
                break;
        case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
@@ -3872,8 +3974,13 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
        bool drv_resc_alloc = p_params->drv_resc_alloc;
        enum _ecore_status_t rc;
 
+       if (IS_ECORE_PACING(p_hwfn)) {
+               DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV,
+                          "Skipping IOV as packet pacing is requested\n");
+       }
+
        /* Since all information is common, only first hwfns should do this */
-       if (IS_LEAD_HWFN(p_hwfn)) {
+       if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) {
                rc = ecore_iov_hw_info(p_hwfn);
                if (rc != ECORE_SUCCESS) {
                        if (p_params->b_relaxed_probe)
@@ -3958,7 +4065,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
         * that can result in performance penalty in some cases. 4
         * represents a good tradeoff between performance and flexibility.
         */
-       p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+       if (IS_ECORE_PACING(p_hwfn))
+               p_hwfn->hw_info.num_hw_tc = 1;
+       else
+               p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
 
        /* start out with a single active tc. This can be increased either
         * by dcbx negotiation or by upper layer driver
@@ -4176,6 +4286,13 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
                rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
                if (rc != ECORE_SUCCESS)
                        DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+
+               /* Workaround for MFW issue where PF FLR does not cleanup
+                * IGU block
+                */
+               if (!(p_hwfn->mcp_info->capabilities &
+                     FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP))
+                       ecore_pf_flr_igu_cleanup(p_hwfn);
        }
 
        /* Check if mdump logs/data are present and update the epoch value */
@@ -4245,6 +4362,8 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
 
        p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
        p_dev->allow_mdump = p_params->allow_mdump;
+       p_hwfn->b_en_pacing = p_params->b_en_pacing;
+       p_dev->b_is_target = p_params->b_is_target;
 
        if (p_params->b_relaxed_probe)
                p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
@@ -4280,6 +4399,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
                                                          BAR_ID_1) / 2;
                p_doorbell = (void OSAL_IOMEM *)addr;
 
+               p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing;
                /* prepare second hw function */
                rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
                                             p_doorbell, p_params);