OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
}
+static void ecore_dbg_user_data_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->dbg_user_info);
+ p_hwfn->dbg_user_info = OSAL_NULL;
+}
+
void ecore_resc_free(struct ecore_dev *p_dev)
{
int i;
ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
ecore_dcbx_info_free(p_hwfn);
+ ecore_dbg_user_data_free(p_hwfn);
/* @@@TBD Flush work-queue ? */
/* destroy doorbell recovery mechanism */
"Failed to allocate memory for dcbx structure\n");
goto alloc_err;
}
- }
+
+ rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate dbg user info structure\n");
+ goto alloc_err;
+ }
+ } /* hwfn loop */
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->reset_stats));
ECORE_ROCE_EDPM_MODE_DISABLE = 2,
};
+bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
+ return false;
+
+ return true;
+}
+
static enum _ecore_status_t
ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
DP_INFO(p_hwfn,
" dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
p_hwfn->dpi_size, p_hwfn->dpi_count,
- ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ (!ecore_edpm_enabled(p_hwfn)) ?
"disabled" : "enabled");
/* Check return codes from above calls */
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 ether_type;
int i;
if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
if (rc != ECORE_SUCCESS)
return rc;
+ if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits) ||
+ OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+ &p_dev->mf_bits))) {
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits))
+ ether_type = ETHER_TYPE_VLAN;
+ else
+ ether_type = ETHER_TYPE_QINQ;
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+ ether_type);
+ }
+
ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms);
rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
rc2 = ECORE_UNKNOWN_ERROR;
}
+ OSAL_DPC_SYNC(p_hwfn);
+
+ /* After this point we don't expect the FW to send us async
+ * events
+ */
+
/* perform debug action after PF stop was sent */
OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
}
- if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
- feat_num[ECORE_FCOE_CQ] =
- OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+ ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ u32 *p_storage_feat = ECORE_IS_FCOE_PERSONALITY(p_hwfn) ?
+ &feat_num[ECORE_FCOE_CQ] :
+ &feat_num[ECORE_ISCSI_CQ];
+ u32 limit = sb_cnt.cnt;
- if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
- feat_num[ECORE_ISCSI_CQ] =
- OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ /* The number of queues should not exceed the number of FP SBs.
+ * In storage target, the queues are divided into pairs of a CQ
+ * and a CmdQ, and each pair uses a single SB. The limit in
+ * this case should allow a max ratio of 2:1 instead of 1:1.
+ */
+ if (p_hwfn->p_dev->b_is_target)
+ limit *= 2;
+ *p_storage_feat = OSAL_MIN_T(u32, limit,
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+
+ /* @DPDK */
+ /* The size of "cq_cmdq_sb_num_arr" in the fcoe/iscsi init
+ * ramrod is limited to "NUM_OF_GLOBAL_QUEUES / 2".
+ */
+ *p_storage_feat = OSAL_MIN_T(u32, *p_storage_feat,
+ (NUM_OF_GLOBAL_QUEUES / 2));
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
"#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+
+ /* Workaround for MFW issue where PF FLR does not cleanup
+ * IGU block
+ */
+ if (!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP))
+ ecore_pf_flr_igu_cleanup(p_hwfn);
}
/* Check if mdump logs/data are present and update the epoch value */
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
p_dev->allow_mdump = p_params->allow_mdump;
p_hwfn->b_en_pacing = p_params->b_en_pacing;
+ p_dev->b_is_target = p_params->b_is_target;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;