X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fbase%2Fice_common.c;h=1683daf28dbecfb353443e14c74ba17929cc1d28;hb=c47d6e83334e656f85e4bb6881cf63da38276b0a;hp=e1181a1028777c7e5ac7a9a6cd980c5e74da190a;hpb=6dfe33fbe5fbbac695af4c495157222ad73ca800;p=dpdk.git diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c index e1181a1028..1683daf28d 100644 --- a/drivers/net/ice/base/ice_common.c +++ b/drivers/net/ice/base/ice_common.c @@ -43,6 +43,11 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) case ICE_DEV_ID_E822L_BACKPLANE: case ICE_DEV_ID_E822L_SFP: case ICE_DEV_ID_E822L_SGMII: + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823L_SFP: hw->mac_type = ICE_MAC_GENERIC; break; default: @@ -192,6 +197,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) { pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low); pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high); + ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type, + sizeof(pi->phy.link_info.module_type), + ICE_NONDMA_TO_NONDMA); + } return status; @@ -264,6 +273,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) return ICE_MEDIA_UNKNOWN; if (hw_link_info->phy_type_low) { + /* 1G SGMII is a special case where some DA cable PHYs + * may show this as an option when it really shouldn't + * be since SGMII is meant to be between a MAC and a PHY + * in a backplane. Try to detect this case and handle it + */ + if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && + (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == + ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || + hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == + ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) + return ICE_MEDIA_DA; + switch (hw_link_info->phy_type_low) { case ICE_PHY_TYPE_LOW_1000BASE_SX: case ICE_PHY_TYPE_LOW_1000BASE_LX: @@ -536,6 +557,7 @@ enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) return ICE_ERR_NO_MEMORY; INIT_LIST_HEAD(&sw->vsi_list_map_head); + sw->prof_res_bm_init = 0; status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list); if (status) { @@ -860,23 +882,23 @@ void ice_deinit_hw(struct ice_hw *hw) */ enum ice_status ice_check_reset(struct ice_hw *hw) { - u32 cnt, reg = 0, grst_delay, uld_mask; + u32 cnt, reg = 0, grst_timeout, uld_mask; /* Poll for Device Active state in case a recent CORER, GLOBR, * or EMPR has occurred. The grst delay value is in 100ms units. * Add 1sec for outstanding AQ commands that can take a long time. */ - grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> - GLGEN_RSTCTL_GRSTDEL_S) + 10; + grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> + GLGEN_RSTCTL_GRSTDEL_S) + 10; - for (cnt = 0; cnt < grst_delay; cnt++) { + for (cnt = 0; cnt < grst_timeout; cnt++) { ice_msec_delay(100, true); reg = rd32(hw, GLGEN_RSTAT); if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) break; } - if (cnt == grst_delay) { + if (cnt == grst_timeout) { ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; @@ -1828,10 +1850,16 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, if (opc == ice_aqc_opc_list_dev_caps) { dev_p = &hw->dev_caps; caps = &dev_p->common_cap; + + ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM); + prefix = "dev cap"; } else if (opc == ice_aqc_opc_list_func_caps) { func_p = &hw->func_caps; caps = &func_p->common_cap; + + ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM); + prefix = "func cap"; } else { ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); @@ -2026,40 +2054,21 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) { enum ice_status status; u32 cap_count; - u16 cbuf_len; - u8 retries; - - /* The driver doesn't know how many capabilities the device will return - * so the buffer size required isn't known ahead of time. The driver - * starts with cbuf_len and if this turns out to be insufficient, the - * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs. - * The driver then allocates the buffer based on the count and retries - * the operation. So it follows that the retry count is 2. - */ -#define ICE_GET_CAP_BUF_COUNT 40 -#define ICE_GET_CAP_RETRY_COUNT 2 - - cap_count = ICE_GET_CAP_BUF_COUNT; - retries = ICE_GET_CAP_RETRY_COUNT; - - do { - void *cbuf; - - cbuf_len = (u16)(cap_count * - sizeof(struct ice_aqc_list_caps_elem)); - cbuf = ice_malloc(hw, cbuf_len); - if (!cbuf) - return ICE_ERR_NO_MEMORY; + void *cbuf; - status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count, - opc, NULL); - ice_free(hw, cbuf); + cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); + if (!cbuf) + return ICE_ERR_NO_MEMORY; - if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) - break; + /* Although the driver doesn't know the number of capabilities the + * device will return, we can simply send a 4KB buffer, the maximum + * possible size that firmware can return. + */ + cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); - /* If ENOMEM is returned, try again with bigger buffer */ - } while (--retries); + status = ice_aq_discover_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, + opc, NULL); + ice_free(hw, cbuf); return status; } @@ -2462,10 +2471,6 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); - if (status == ICE_SUCCESS) - ice_memcpy(li->module_type, &pcaps->module_type, - sizeof(li->module_type), - ICE_NONDMA_TO_NONDMA); ice_free(hw, pcaps); } @@ -2631,6 +2636,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) if (!pi || !aq_failures) return ICE_ERR_BAD_PTR; + *aq_failures = 0; hw = pi->hw; pcaps = (struct ice_aqc_get_phy_caps_data *) @@ -4005,7 +4011,18 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, * Without setting the generic section as valid in valid_sections, the * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. */ - buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; + buf->txqs[0].info.valid_sections = + ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | + ICE_AQC_ELEM_VALID_EIR; + buf->txqs[0].info.generic = 0; + buf->txqs[0].info.cir_bw.bw_profile_idx = + CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->txqs[0].info.cir_bw.bw_alloc = + CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); + buf->txqs[0].info.eir_bw.bw_profile_idx = + CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->txqs[0].info.eir_bw.bw_alloc = + CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); /* add the LAN queue */ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); @@ -4527,3 +4544,18 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, return status; } + +/** + * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled + * @caps: get PHY capability data + */ +bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) +{ + if (caps->caps & ICE_AQC_PHY_AN_MODE || + caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | + ICE_AQC_PHY_AN_EN_CLAUSE73 | + ICE_AQC_PHY_AN_EN_CLAUSE37)) + return true; + + return false; +}