X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fbase%2Fecore_dev.c;h=da9cdc9a430b5cdc3a9d05ae055229fc350a73c1;hb=191447ce23765dc22a21b5cfe02c7d1a7a776258;hp=b530173341f4bba5253816b25f6ceaa86a5ea7b5;hpb=301ea2d7147c4c837cd3a88bd734b59c3764623c;p=dpdk.git diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c index b530173341..da9cdc9a43 100644 --- a/drivers/net/qede/base/ecore_dev.c +++ b/drivers/net/qede/base/ecore_dev.c @@ -70,28 +70,26 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id) } val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); + if (val) + return 1 << (val + 15); /* The above registers were updated in the past only in CMT mode. Since * they were found to be useful MFW started updating them from 8.7.7.0. * In older MFW versions they are set to 0 which means disabled. */ - if (!val) { - if (p_hwfn->p_dev->num_hwfns > 1) { - DP_NOTICE(p_hwfn, false, - "BAR size not configured. Assuming BAR size"); - DP_NOTICE(p_hwfn, false, - "of 256kB for GRC and 512kB for DB\n"); - return BAR_ID_0 ? 256 * 1024 : 512 * 1024; - } else { - DP_NOTICE(p_hwfn, false, - "BAR size not configured. Assuming BAR size"); - DP_NOTICE(p_hwfn, false, - "of 512kB for GRC and 512kB for DB\n"); - return 512 * 1024; - } + if (p_hwfn->p_dev->num_hwfns > 1) { + DP_NOTICE(p_hwfn, false, + "BAR size not configured. Assuming BAR size of 256kB" + " for GRC and 512kB for DB\n"); + val = BAR_ID_0 ? 256 * 1024 : 512 * 1024; + } else { + DP_NOTICE(p_hwfn, false, + "BAR size not configured. Assuming BAR size of 512kB" + " for GRC and 512kB for DB\n"); + val = 512 * 1024; } - return 1 << (val + 15); + return val; } void ecore_init_dp(struct ecore_dev *p_dev, @@ -138,13 +136,9 @@ static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) struct ecore_qm_info *qm_info = &p_hwfn->qm_info; OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); - qm_info->qm_pq_params = OSAL_NULL; OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); - qm_info->qm_vport_params = OSAL_NULL; OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); - qm_info->qm_port_params = OSAL_NULL; OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); - qm_info->wfq_data = OSAL_NULL; } void ecore_resc_free(struct ecore_dev *p_dev) @@ -155,7 +149,6 @@ void ecore_resc_free(struct ecore_dev *p_dev) return; OSAL_FREE(p_dev, p_dev->fw_data); - p_dev->fw_data = OSAL_NULL; OSAL_FREE(p_dev, p_dev->reset_stats); @@ -163,9 +156,7 @@ void ecore_resc_free(struct ecore_dev *p_dev) struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; OSAL_FREE(p_dev, p_hwfn->p_tx_cids); - p_hwfn->p_tx_cids = OSAL_NULL; OSAL_FREE(p_dev, p_hwfn->p_rx_cids); - p_hwfn->p_rx_cids = OSAL_NULL; } for_each_hwfn(p_dev, i) { @@ -542,11 +533,14 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) /* Allocate Memory for the Queue->CID mapping */ for_each_hwfn(p_dev, i) { struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + u32 num_tx_conns = RESC_NUM(p_hwfn, ECORE_L2_QUEUE); + int tx_size, rx_size; /* @@@TMP - resc management, change to actual required size */ - int tx_size = sizeof(struct ecore_hw_cid_data) * - RESC_NUM(p_hwfn, ECORE_L2_QUEUE); - int rx_size = sizeof(struct ecore_hw_cid_data) * + if (p_hwfn->pf_params.eth_pf_params.num_cons > num_tx_conns) + num_tx_conns = p_hwfn->pf_params.eth_pf_params.num_cons; + tx_size = sizeof(struct ecore_hw_cid_data) * num_tx_conns; + rx_size = sizeof(struct ecore_hw_cid_data) * RESC_NUM(p_hwfn, ECORE_L2_QUEUE); p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, @@ -647,19 +641,20 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) ecore_cxt_get_proto_cid_count( p_hwfn, PROTOCOLID_ROCE, - 0); + OSAL_NULL); num_cons *= 2; } else { num_cons = ecore_cxt_get_proto_cid_count( p_hwfn, PROTOCOLID_IWARP, - 0); + OSAL_NULL); } n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { num_cons = ecore_cxt_get_proto_cid_count(p_hwfn, - PROTOCOLID_ISCSI, 0); + PROTOCOLID_ISCSI, + OSAL_NULL); n_eqes += 2 * num_cons; } @@ -667,7 +662,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements." "The maximum of a u16 chain is 0x%x\n", n_eqes, 0xFFFF); - goto alloc_err; + goto alloc_no_mem; } p_eq = ecore_eq_alloc(p_hwfn, (u16)n_eqes); @@ -707,7 +702,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) } p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, - sizeof(struct ecore_eth_stats)); + sizeof(*p_dev->reset_stats)); if (!p_dev->reset_stats) { DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n"); goto alloc_no_mem; @@ -794,10 +789,9 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Sending final cleanup for PFVF[%d] [Command %08x\n]", - id, OSAL_CPU_TO_LE32(command)); + id, command); - ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, - OSAL_CPU_TO_LE32(command)); + ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); /* Poll until completion */ while (!REG_RD(p_hwfn, addr) && count--) @@ -819,9 +813,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) { int hw_mode = 0; - if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { - hw_mode |= 1 << MODE_BB_A0; - } else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) { + if (ECORE_IS_BB_B0(p_hwfn->p_dev)) { hw_mode |= 1 << MODE_BB_B0; } else if (ECORE_IS_AH(p_hwfn->p_dev)) { hw_mode |= 1 << MODE_K2; @@ -877,11 +869,6 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) #endif hw_mode |= 1 << MODE_ASIC; -#ifndef REAL_ASIC_ONLY - if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) - hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND; -#endif - if (p_hwfn->p_dev->num_hwfns > 1) hw_mode |= 1 << MODE_100G; @@ -987,7 +974,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, ecore_gtt_init(p_hwfn); #ifndef ASIC_ONLY - if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { + if (CHIP_REV_IS_EMUL(p_dev)) { rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt); if (rc != ECORE_SUCCESS) return rc; @@ -1002,7 +989,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, } ecore_qm_common_rt_init(p_hwfn, - p_hwfn->p_dev->num_ports_in_engines, + p_dev->num_ports_in_engines, qm_info->max_phys_tcs_per_port, qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en, qm_info->vport_wfq_en, @@ -1032,11 +1019,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); - if (ECORE_IS_BB(p_hwfn->p_dev)) { + if (ECORE_IS_BB(p_dev)) { /* Workaround clears ROCE search for all functions to prevent * involving non initialized function in processing ROCE packet. */ - num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev); + num_pfs = NUM_OF_ENG_PFS(p_dev); for (pf_id = 0; pf_id < num_pfs; pf_id++) { ecore_fid_pretend(p_hwfn, p_ptt, pf_id); ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); @@ -1052,8 +1039,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, * This is not done inside the init tool since it currently can't * perform a pretending to VFs. */ - max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2 - : MAX_NUM_VFS_BB; + max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); @@ -1532,7 +1518,9 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, return rc; if (b_hw_start) { /* enable interrupts */ - ecore_int_igu_enable(p_hwfn, p_ptt, int_mode); + rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode); + if (rc != ECORE_SUCCESS) + return rc; /* send function start command */ rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode, @@ -1623,9 +1611,10 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, { enum _ecore_status_t rc, mfw_rc; u32 load_code, param; - int i, j; + int i; - if (p_params->int_mode == ECORE_INT_MODE_MSI && p_dev->num_hwfns > 1) { + if ((p_params->int_mode == ECORE_INT_MODE_MSI) && + (p_dev->num_hwfns > 1)) { DP_NOTICE(p_dev, false, "MSI mode is not supported for CMT devices\n"); return ECORE_INVAL; @@ -1707,25 +1696,6 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, p_hwfn->hw_info.hw_mode); if (rc) break; - -#ifndef REAL_ASIC_ONLY - if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) { - struct init_nig_pri_tc_map_req tc_map; - - OSAL_MEM_ZERO(&tc_map, sizeof(tc_map)); - - /* remove this once flow control is - * implemented - */ - for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) { - tc_map.pri[j].tc_id = 0; - tc_map.pri[j].valid = 1; - } - ecore_init_nig_pri_tc_map(p_hwfn, - p_hwfn->p_main_ptt, - &tc_map); - } -#endif /* Fall into */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, @@ -1757,10 +1727,6 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, return mfw_rc; } - ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt); - ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, - p_params->epoch); - /* send DCBX attention request command */ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "sending phony dcbx set command to trigger DCBx attention handling\n"); @@ -1802,13 +1768,14 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev, */ OSAL_MSLEEP(1); } - if (i == ECORE_HW_STOP_RETRY_LIMIT) - DP_NOTICE(p_hwfn, true, - "Timers linear scans are not over [Connection %02x Tasks %02x]\n", - (u8)ecore_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_CONN), - (u8)ecore_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK)); + + if (i < ECORE_HW_STOP_RETRY_LIMIT) + return; + + DP_NOTICE(p_hwfn, true, "Timers linear scans are not over" + " [Connection %02x Tasks %02x]\n", + (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), + (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); } void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) @@ -2090,6 +2057,7 @@ static void get_function_id(struct ecore_hwfn *p_hwfn) static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) { u32 *feat_num = p_hwfn->hw_info.feat_num; + struct ecore_sb_cnt_info sb_cnt_info; int num_features = 1; /* L2 Queues require each: 1 status block. 1 L2 queue */ @@ -2098,11 +2066,21 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) RESC_NUM(p_hwfn, ECORE_SB) / num_features, RESC_NUM(p_hwfn, ECORE_L2_QUEUE)); + OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info)); + ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info); + feat_num[ECORE_VF_L2_QUE] = + OSAL_MIN_T(u32, + RESC_NUM(p_hwfn, ECORE_L2_QUEUE) - + FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), + sb_cnt_info.sb_iov_cnt); + DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, - "#PF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n", - feat_num[ECORE_PF_L2_QUE], - feat_num[ECORE_RDMA_CNQ], - RESC_NUM(p_hwfn, ECORE_SB), num_features); + "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n", + (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE), + (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE), + (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ), + RESC_NUM(p_hwfn, ECORE_SB), + num_features); } static enum resource_id_enum @@ -2218,6 +2196,40 @@ static u32 ecore_hw_get_dflt_resc_num(struct ecore_hwfn *p_hwfn, return dflt_resc_num; } +static const char *ecore_hw_get_resc_name(enum ecore_resources res_id) +{ + switch (res_id) { + case ECORE_SB: + return "SB"; + case ECORE_L2_QUEUE: + return "L2_QUEUE"; + case ECORE_VPORT: + return "VPORT"; + case ECORE_RSS_ENG: + return "RSS_ENG"; + case ECORE_PQ: + return "PQ"; + case ECORE_RL: + return "RL"; + case ECORE_MAC: + return "MAC"; + case ECORE_VLAN: + return "VLAN"; + case ECORE_RDMA_CNQ_RAM: + return "RDMA_CNQ_RAM"; + case ECORE_ILT: + return "ILT"; + case ECORE_LL2_QUEUE: + return "LL2_QUEUE"; + case ECORE_CMDQS_CQS: + return "CMDQS_CQS"; + case ECORE_RDMA_STATS_QUEUE: + return "RDMA_STATS_QUEUE"; + default: + return "UNKNOWN_RESOURCE"; + } +} + static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id, bool drv_resc_alloc) @@ -2232,8 +2244,9 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, dflt_resc_num = ecore_hw_get_dflt_resc_num(p_hwfn, res_id); if (!dflt_resc_num) { - DP_ERR(p_hwfn, "Failed to get default amount for resource %d\n", - res_id); + DP_ERR(p_hwfn, + "Failed to get default amount for resource %d [%s]\n", + res_id, ecore_hw_get_resc_name(res_id)); return ECORE_INVAL; } dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx; @@ -2259,8 +2272,9 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, &mcp_resp, &mcp_param); if (rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, true, - "MFW resp failure for a resc alloc req [res_id %d]\n", - res_id); + "MFW response failure for an allocation request for" + " resource %d [%s]\n", + res_id, ecore_hw_get_resc_name(res_id)); return rc; } @@ -2273,11 +2287,11 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) { /* @DPDK */ DP_INFO(p_hwfn, - "No allocation info for resc %d [mcp_resp 0x%x].", - res_id, mcp_resp); - DP_INFO(p_hwfn, - "Applying default values [num %d, start %d].\n", - dflt_resc_num, dflt_resc_start); + "Resource %d [%s]: No allocation info was received" + " [mcp_resp 0x%x]. Applying default values" + " [num %d, start %d].\n", + res_id, ecore_hw_get_resc_name(res_id), mcp_resp, + dflt_resc_num, dflt_resc_start); *p_resc_num = dflt_resc_num; *p_resc_start = dflt_resc_start; @@ -2295,14 +2309,11 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, *p_resc_start = resc_info.offset; if (*p_resc_num != dflt_resc_num || *p_resc_start != dflt_resc_start) { - DP_NOTICE(p_hwfn, false, - "Resource %d: MFW allocation [num %d, start %d]", - res_id, *p_resc_num, *p_resc_start); - DP_NOTICE(p_hwfn, false, - "differs from default values [num %d, start %d]%s\n", - dflt_resc_num, - dflt_resc_start, - drv_resc_alloc ? " - applying default values" : ""); + DP_INFO(p_hwfn, + "Resource %d [%s]: MFW allocation [num %d, start %d] differs from default values [num %d, start %d]%s\n", + res_id, ecore_hw_get_resc_name(res_id), *p_resc_num, + *p_resc_start, dflt_resc_num, dflt_resc_start, + drv_resc_alloc ? " - Applying default values" : ""); if (drv_resc_alloc) { *p_resc_num = dflt_resc_num; *p_resc_start = dflt_resc_start; @@ -2312,40 +2323,6 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; } -static const char *ecore_hw_get_resc_name(enum ecore_resources res_id) -{ - switch (res_id) { - case ECORE_SB: - return "SB"; - case ECORE_L2_QUEUE: - return "L2_QUEUE"; - case ECORE_VPORT: - return "VPORT"; - case ECORE_RSS_ENG: - return "RSS_ENG"; - case ECORE_PQ: - return "PQ"; - case ECORE_RL: - return "RL"; - case ECORE_MAC: - return "MAC"; - case ECORE_VLAN: - return "VLAN"; - case ECORE_RDMA_CNQ_RAM: - return "RDMA_CNQ_RAM"; - case ECORE_ILT: - return "ILT"; - case ECORE_LL2_QUEUE: - return "LL2_QUEUE"; - case ECORE_CMDQS_CQS: - return "CMDQS_CQS"; - case ECORE_RDMA_STATS_QUEUE: - return "RDMA_STATS_QUEUE"; - default: - return "UNKNOWN_RESOURCE"; - } -} - static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, bool drv_resc_alloc) { @@ -2425,7 +2402,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn, static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { - u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode; u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; struct ecore_mcp_link_params *link; @@ -2474,6 +2451,9 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G; + break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; break; @@ -2486,6 +2466,28 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, break; } + /* Read DCBX configuration */ + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + dcbx_mode = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + OFFSETOF(struct nvm_cfg1_port, generic_cont0)); + dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK) + >> NVM_CFG1_PORT_DCBX_MODE_OFFSET; + switch (dcbx_mode) { + case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC: + p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC; + break; + case NVM_CFG1_PORT_DCBX_MODE_CEE: + p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE; + break; + case NVM_CFG1_PORT_DCBX_MODE_IEEE: + p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE; + break; + default: + p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED; + } + /* Read default link configuration */ link = &p_hwfn->mcp_info->link_input; port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + @@ -2751,7 +2753,7 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, /* Since all information is common, only first hwfns should do this */ if (IS_LEAD_HWFN(p_hwfn)) { rc = ecore_iov_hw_info(p_hwfn); - if (rc) + if (rc != ECORE_SUCCESS) return rc; } @@ -2765,12 +2767,17 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, ecore_hw_info_port_num(p_hwfn, p_ptt); #ifndef ASIC_ONLY - if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) + if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) { +#endif + rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) + return rc; +#ifndef ASIC_ONLY + } #endif - ecore_hw_get_nvm_info(p_hwfn, p_ptt); rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); - if (rc) + if (rc != ECORE_SUCCESS) return rc; #ifndef ASIC_ONLY @@ -2795,11 +2802,14 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, ecore_mcp_cmd_port_init(p_hwfn, p_ptt); } - if (personality != ECORE_PCI_DEFAULT) + if (personality != ECORE_PCI_DEFAULT) { p_hwfn->hw_info.personality = personality; - else if (ecore_mcp_is_init(p_hwfn)) - p_hwfn->hw_info.personality = - p_hwfn->mcp_info->func_info.protocol; + } else if (ecore_mcp_is_init(p_hwfn)) { + enum ecore_pci_personality protocol; + + protocol = p_hwfn->mcp_info->func_info.protocol; + p_hwfn->hw_info.personality = protocol; + } #ifndef ASIC_ONLY /* To overcome ILT lack for emulation, until at least until we'll have @@ -2892,9 +2902,9 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev) MISCS_REG_CHIP_METAL); MASK_FIELD(CHIP_METAL, p_dev->chip_metal); DP_INFO(p_dev->hwfns, - "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", + "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", ECORE_IS_BB(p_dev) ? "BB" : "AH", - CHIP_REV_IS_A0(p_dev) ? 0 : 1, + 'A' + p_dev->chip_rev, (int)p_dev->chip_metal, p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, p_dev->chip_metal); @@ -2948,17 +2958,20 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev) #endif static enum _ecore_status_t -ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview, - void OSAL_IOMEM *p_doorbells, +ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, + void OSAL_IOMEM * p_regview, + void OSAL_IOMEM * p_doorbells, struct ecore_hw_prepare_params *p_params) { + struct ecore_dev *p_dev = p_hwfn->p_dev; + struct ecore_mdump_info mdump_info; enum _ecore_status_t rc = ECORE_SUCCESS; /* Split PCI bars evenly between hwfns */ p_hwfn->regview = p_regview; p_hwfn->doorbells = p_doorbells; - if (IS_VF(p_hwfn->p_dev)) + if (IS_VF(p_dev)) return ecore_vf_hw_prepare(p_hwfn); /* Validate that chip access is feasible */ @@ -2982,7 +2995,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview, /* First hwfn learns basic information, e.g., number of hwfns */ if (!p_hwfn->my_id) { - rc = ecore_get_dev_info(p_hwfn->p_dev); + rc = ecore_get_dev_info(p_dev); if (rc != ECORE_SUCCESS) goto err1; } @@ -3004,6 +3017,29 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview, goto err2; } + /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is + * called, since among others it sets the ports number in an engine. + */ + if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) && + !p_dev->recov_in_prog) { + rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n"); + } + + /* Check if mdump logs are present and update the epoch value */ + if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) { + rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt, + &mdump_info); + if (rc == ECORE_SUCCESS && mdump_info.num_of_logs > 0) { + DP_NOTICE(p_hwfn, false, + "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n"); + } + + ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt, + p_params->epoch); + } + /* Allocate the init RT array and initialize the init-ops engine */ rc = ecore_init_alloc(p_hwfn); if (rc) { @@ -3011,7 +3047,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview, goto err2; } #ifndef ASIC_ONLY - if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + if (CHIP_REV_IS_FPGA(p_dev)) { DP_NOTICE(p_hwfn, false, "FPGA: workaround; Prevent DMAE parities\n"); ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7); @@ -3026,7 +3062,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview, return rc; err2: if (IS_LEAD_HWFN(p_hwfn)) - ecore_iov_free_hw_info(p_hwfn->p_dev); + ecore_iov_free_hw_info(p_dev); ecore_mcp_free(p_hwfn); err1: ecore_hw_hwfn_free(p_hwfn); @@ -3089,7 +3125,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, } } - return ECORE_SUCCESS; + return rc; } void ecore_hw_remove(struct ecore_dev *p_dev) @@ -3178,8 +3214,10 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev, } pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; - OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table, - p_chain->pbl.p_phys_table, pbl_size); + + if (!p_chain->pbl.external) + OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table, + p_chain->pbl.p_phys_table, pbl_size); out: OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); } @@ -3264,8 +3302,8 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) static enum _ecore_status_t ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) { - void *p_virt = OSAL_NULL; dma_addr_t p_phys = 0; + void *p_virt = OSAL_NULL; p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); if (!p_virt) { @@ -3279,8 +3317,10 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) return ECORE_SUCCESS; } -static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev, - struct ecore_chain *p_chain) +static enum _ecore_status_t +ecore_chain_alloc_pbl(struct ecore_dev *p_dev, + struct ecore_chain *p_chain, + struct ecore_chain_ext_pbl *ext_pbl) { void *p_virt = OSAL_NULL; u8 *p_pbl_virt = OSAL_NULL; @@ -3304,7 +3344,15 @@ static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev, * should be saved to allow its freeing during the error flow. */ size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; - p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); + + if (ext_pbl == OSAL_NULL) { + p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); + } else { + p_pbl_virt = ext_pbl->p_pbl_virt; + p_pbl_phys = ext_pbl->p_pbl_phys; + p_chain->pbl.external = true; + } + ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_virt_addr_tbl); if (!p_pbl_virt) { @@ -3342,7 +3390,8 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, enum ecore_chain_mode mode, enum ecore_chain_cnt_type cnt_type, u32 num_elems, osal_size_t elem_size, - struct ecore_chain *p_chain) + struct ecore_chain *p_chain, + struct ecore_chain_ext_pbl *ext_pbl) { u32 page_cnt; enum _ecore_status_t rc = ECORE_SUCCESS; @@ -3373,7 +3422,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, rc = ecore_chain_alloc_single(p_dev, p_chain); break; case ECORE_CHAIN_MODE_PBL: - rc = ecore_chain_alloc_pbl(p_dev, p_chain); + rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl); break; } if (rc) @@ -3768,8 +3817,8 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, return ECORE_INVAL; } - OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); p_coal_timeset = p_eth_qzone; + OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); @@ -3782,10 +3831,10 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, u16 coalesce, u8 qid, u16 sb_id) { struct ustorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; u16 fw_qid = 0; u32 address; enum _ecore_status_t rc; - u8 timeset, timer_res; /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ if (coalesce <= 0x7F) { @@ -3825,10 +3874,10 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, u16 coalesce, u8 qid, u16 sb_id) { struct xstorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; u16 fw_qid = 0; u32 address; enum _ecore_status_t rc; - u8 timeset, timer_res; /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ if (coalesce <= 0x7F) { @@ -4285,3 +4334,16 @@ int ecore_device_num_ports(struct ecore_dev *p_dev) return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev); } + +void ecore_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, + __le16 *fw_lsb, + u8 *mac) +{ + ((u8 *)fw_msb)[0] = mac[1]; + ((u8 *)fw_msb)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lsb)[0] = mac[5]; + ((u8 *)fw_lsb)[1] = mac[4]; +}