-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
}
+static void ecore_dbg_user_data_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->dbg_user_info);
+ p_hwfn->dbg_user_info = OSAL_NULL;
+}
+
void ecore_resc_free(struct ecore_dev *p_dev)
{
int i;
ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
ecore_dcbx_info_free(p_hwfn);
+ ecore_dbg_user_data_free(p_hwfn);
/* @@@TBD Flush work-queue ? */
/* destroy doorbell recovery mechanism */
"Failed to allocate memory for dcbx structure\n");
goto alloc_err;
}
- }
+
+ rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate dbg user info structure\n");
+ goto alloc_err;
+ }
+ } /* hwfn loop */
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->reset_stats));
ECORE_ROCE_EDPM_MODE_DISABLE = 2,
};
+bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
+ return false;
+
+ return true;
+}
+
static enum _ecore_status_t
ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
DP_INFO(p_hwfn,
" dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
p_hwfn->dpi_size, p_hwfn->dpi_count,
- ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ (!ecore_edpm_enabled(p_hwfn)) ?
"disabled" : "enabled");
/* Check return codes from above calls */
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 ether_type;
int i;
if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
if (rc != ECORE_SUCCESS)
return rc;
+ if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits) ||
+ OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+ &p_dev->mf_bits))) {
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits))
+ ether_type = ETHER_TYPE_VLAN;
+ else
+ ether_type = ETHER_TYPE_QINQ;
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+ ether_type);
+ }
+
ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms);
rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
}
}
- /* Log and clean previous pglue_b errors if such exist */
- ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
- ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+ /* Log and clear previous pglue_b errors if such exist */
+ ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
/* Enable the PF's internal FID_enable in the PXP */
rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
if (rc != ECORE_SUCCESS)
goto load_err;
+ /* Clear the pglue_b was_error indication.
+ * In E4 it must be done after the BME and the internal
+ * FID_enable for the PF are set, since VDMs may cause the
+ * indication to be set again.
+ */
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_init_done = true;
}
+ if (IS_PF(p_dev)) {
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
+ &resp, ¶m);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+ }
+
if (IS_PF(p_dev)) {
p_hwfn = ECORE_LEADING_HWFN(p_dev);
drv_mb_param = STORM_FW_VERSION;
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update firmware version\n");
- if (!b_default_mtu)
+ if (!b_default_mtu) {
rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.mtu);
- if (rc != ECORE_SUCCESS)
- DP_INFO(p_hwfn, "Failed to update default mtu\n");
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update default mtu\n");
+ }
rc = ecore_mcp_ov_update_driver_state(p_hwfn,
p_hwfn->p_main_ptt,
ECORE_OV_DRIVER_STATE_DISABLED);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update driver state\n");
+
+ rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_OV_ESWITCH_NONE);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
}
return rc;
rc2 = ECORE_UNKNOWN_ERROR;
}
+ OSAL_DPC_SYNC(p_hwfn);
+
+ /* After this point we don't expect the FW to send us async
+ * events
+ */
+
/* perform debug action after PF stop was sent */
OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
}
- if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
- feat_num[ECORE_FCOE_CQ] =
- OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+ ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ u32 *p_storage_feat = ECORE_IS_FCOE_PERSONALITY(p_hwfn) ?
+ &feat_num[ECORE_FCOE_CQ] :
+ &feat_num[ECORE_ISCSI_CQ];
+ u32 limit = sb_cnt.cnt;
- if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
- feat_num[ECORE_ISCSI_CQ] =
- OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ /* The number of queues should not exceed the number of FP SBs.
+ * In storage target, the queues are divided into pairs of a CQ
+ * and a CmdQ, and each pair uses a single SB. The limit in
+ * this case should allow a max ratio of 2:1 instead of 1:1.
+ */
+ if (p_hwfn->p_dev->b_is_target)
+ limit *= 2;
+ *p_storage_feat = OSAL_MIN_T(u32, limit,
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+
+ /* @DPDK */
+ /* The size of "cq_cmdq_sb_num_arr" in the fcoe/iscsi init
+ * ramrod is limited to "NUM_OF_GLOBAL_QUEUES / 2".
+ */
+ *p_storage_feat = OSAL_MIN_T(u32, *p_storage_feat,
+ (NUM_OF_GLOBAL_QUEUES / 2));
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
"#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+
+ /* Workaround for MFW issue where PF FLR does not cleanup
+ * IGU block
+ */
+ if (!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP))
+ ecore_pf_flr_igu_cleanup(p_hwfn);
}
/* Check if mdump logs/data are present and update the epoch value */
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
p_dev->allow_mdump = p_params->allow_mdump;
p_hwfn->b_en_pacing = p_params->b_en_pacing;
+ p_dev->b_is_target = p_params->b_is_target;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;