X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fbase%2Fice_common.c;h=e266ba56c3f80374beb40c308df06f7c04ec42ed;hb=4003b3d7af6d4c81be7f7b01d9b922d1cd79fddd;hp=0356537a414bd1950b0cdd5dd950a702576f6a40;hpb=5d0b7b5fc4912cfbcaa4fd2e7faa796dc3e445f7;p=dpdk.git diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c index 0356537a41..e266ba56c3 100644 --- a/drivers/net/ice/base/ice_common.c +++ b/drivers/net/ice/base/ice_common.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2001-2019 + * Copyright(c) 2001-2020 Intel Corporation */ #include "ice_common.h" @@ -9,37 +9,7 @@ #include "ice_flow.h" #include "ice_switch.h" -#define ICE_PF_RESET_WAIT_COUNT 200 - -#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \ - wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \ - ((ICE_RX_OPC_MDID << \ - GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ - GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ - (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ - GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) - -#define ICE_PROG_FLEX_ENTRY_EXTRACT(hw, rxdid, protid, off, idx) \ - wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \ - ((ICE_RX_OPC_EXTRACT << \ - GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ - GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ - (((protid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ - GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M) | \ - (((off) << GLFLXP_RXDID_FLX_WRD_##idx##_EXTRACTION_OFFSET_S) & \ - GLFLXP_RXDID_FLX_WRD_##idx##_EXTRACTION_OFFSET_M)) - -#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \ - wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \ - (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ - GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ - (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ - GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \ - (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \ - GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \ - (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \ - GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M)) - +#define ICE_PF_RESET_WAIT_COUNT 300 /** * ice_set_mac_type - Sets MAC type @@ -50,27 +20,40 @@ */ static enum ice_status ice_set_mac_type(struct ice_hw *hw) { - enum ice_status status = ICE_SUCCESS; - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - if (hw->vendor_id == ICE_INTEL_VENDOR_ID) { - switch (hw->device_id) { - default: - hw->mac_type = ICE_MAC_GENERIC; - break; - } - } else { - status = ICE_ERR_DEVICE_NOT_SUPPORTED; + if (hw->vendor_id != ICE_INTEL_VENDOR_ID) + return ICE_ERR_DEVICE_NOT_SUPPORTED; + + switch (hw->device_id) { + case ICE_DEV_ID_E810C_BACKPLANE: + case ICE_DEV_ID_E810C_QSFP: + case ICE_DEV_ID_E810C_SFP: + case ICE_DEV_ID_E810_XXV_BACKPLANE: + case ICE_DEV_ID_E810_XXV_QSFP: + case ICE_DEV_ID_E810_XXV_SFP: + hw->mac_type = ICE_MAC_E810; + break; + case ICE_DEV_ID_E822C_10G_BASE_T: + case ICE_DEV_ID_E822C_BACKPLANE: + case ICE_DEV_ID_E822C_QSFP: + case ICE_DEV_ID_E822C_SFP: + case ICE_DEV_ID_E822C_SGMII: + case ICE_DEV_ID_E822L_10G_BASE_T: + case ICE_DEV_ID_E822L_BACKPLANE: + case ICE_DEV_ID_E822L_SFP: + case ICE_DEV_ID_E822L_SGMII: + hw->mac_type = ICE_MAC_GENERIC; + break; + default: + hw->mac_type = ICE_MAC_UNKNOWN; + break; } - ice_debug(hw, ICE_DBG_INIT, "found mac_type: %d, status: %d\n", - hw->mac_type, status); - - return status; + ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); + return ICE_SUCCESS; } - /** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure @@ -142,7 +125,6 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, ETH_ALEN, ICE_DMA_TO_NONDMA); break; } - return ICE_SUCCESS; } @@ -187,6 +169,56 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, return status; } +/** + * ice_aq_get_link_topo_handle - get link topology node return status + * @pi: port information structure + * @node_type: requested node type + * @cd: pointer to command details structure or NULL + * + * Get link topology node return status for specified node type (0x06E0) + * + * Node type cage can be used to determine if cage is present. If AQC + * returns error (ENOENT), then no cage present. If no cage present, then + * connection type is backplane or BASE-T. + */ +static enum ice_status +ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, + struct ice_sq_cd *cd) +{ + struct ice_aqc_get_link_topo *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.get_link_topo; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + + cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << + ICE_AQC_LINK_TOPO_NODE_CTX_S); + + /* set node type */ + cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); + + return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); +} + +/** + * ice_is_media_cage_present + * @pi: port information structure + * + * Returns true if media cage is present, else false. If no cage, then + * media type is backplane or BASE-T. + */ +static bool ice_is_media_cage_present(struct ice_port_info *pi) +{ + /* Node type cage can be used to determine if cage is present. If AQC + * returns error (ENOENT), then no cage present. If no cage present then + * connection type is backplane or BASE-T. + */ + return !ice_aq_get_link_topo_handle(pi, + ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, + NULL); +} + /** * ice_get_media_type - Gets media type * @pi: port information structure @@ -212,7 +244,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_SR: case ICE_PHY_TYPE_LOW_25GBASE_LR: - case ICE_PHY_TYPE_LOW_25G_AUI_C2C: case ICE_PHY_TYPE_LOW_40GBASE_SR4: case ICE_PHY_TYPE_LOW_40GBASE_LR4: case ICE_PHY_TYPE_LOW_50GBASE_SR2: @@ -225,6 +256,15 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_100GBASE_SR2: case ICE_PHY_TYPE_LOW_100GBASE_DR: return ICE_MEDIA_FIBER; + case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: + return ICE_MEDIA_FIBER; case ICE_PHY_TYPE_LOW_100BASE_TX: case ICE_PHY_TYPE_LOW_1000BASE_T: case ICE_PHY_TYPE_LOW_2500BASE_T: @@ -243,6 +283,16 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: case ICE_PHY_TYPE_LOW_100GBASE_CP2: return ICE_MEDIA_DA; + case ICE_PHY_TYPE_LOW_25G_AUI_C2C: + case ICE_PHY_TYPE_LOW_40G_XLAUI: + case ICE_PHY_TYPE_LOW_50G_LAUI2: + case ICE_PHY_TYPE_LOW_50G_AUI2: + case ICE_PHY_TYPE_LOW_50G_AUI1: + case ICE_PHY_TYPE_LOW_100G_AUI4: + case ICE_PHY_TYPE_LOW_100G_CAUI4: + if (ice_is_media_cage_present(pi)) + return ICE_MEDIA_AUI; + /* fall-through */ case ICE_PHY_TYPE_LOW_1000BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_X: @@ -260,8 +310,16 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) } } else { switch (hw_link_info->phy_type_high) { + case ICE_PHY_TYPE_HIGH_100G_AUI2: + case ICE_PHY_TYPE_HIGH_100G_CAUI2: + if (ice_is_media_cage_present(pi)) + return ICE_MEDIA_AUI; + /* fall-through */ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: return ICE_MEDIA_BACKPLANE; + case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: + case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: + return ICE_MEDIA_FIBER; } } return ICE_MEDIA_UNKNOWN; @@ -365,160 +423,40 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, } /** - * ice_init_flex_flags - * @hw: pointer to the hardware structure - * @prof_id: Rx Descriptor Builder profile ID + * ice_fill_tx_timer_and_fc_thresh + * @hw: pointer to the HW struct + * @cmd: pointer to MAC cfg structure * - * Function to initialize Rx flex flags + * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command + * descriptor */ -static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id) +static void +ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, + struct ice_aqc_set_mac_cfg *cmd) { - u8 idx = 0; + u16 fc_thres_val, tx_timer_val; + u32 val; - /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout: - * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE - * flexiflags1[3:0] - Not used for flag programming - * flexiflags2[7:0] - Tunnel and VLAN types - * 2 invalid fields in last index - */ - switch (prof_id) { - /* Rx flex flags are currently programmed for the NIC profiles only. - * Different flag bit programming configurations can be added per - * profile as needed. + /* We read back the transmit timer and fc threshold value of + * LFC. Thus, we will use index = + * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. + * + * Also, because we are opearating on transmit timer and fc + * threshold of LFC, we don't turn on any bit in tx_tmr_priority */ - case ICE_RXDID_FLEX_NIC: - case ICE_RXDID_FLEX_NIC_2: - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG, - ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI, - ICE_FLG_FIN, idx++); - /* flex flag 1 is not used for flexi-flag programming, skipping - * these four FLG64 bits. - */ - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST, - ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++); - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI, - ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100, - ICE_FLG_EVLAN_x9100, idx++); - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100, - ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC, - ICE_FLG_TNL0, idx++); - ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2, - ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx); - break; - - default: - ice_debug(hw, ICE_DBG_INIT, - "Flag programming for profile ID %d not supported\n", - prof_id); - } -} - -/** - * ice_init_flex_flds - * @hw: pointer to the hardware structure - * @prof_id: Rx Descriptor Builder profile ID - * - * Function to initialize flex descriptors - */ -static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) -{ - enum ice_prot_id protid_0, protid_1; - u16 offset_0, offset_1; - enum ice_flex_mdid mdid; - - switch (prof_id) { - case ICE_RXDID_FLEX_NIC: - case ICE_RXDID_FLEX_NIC_2: - ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_MDID_RX_HASH_LOW, 0); - ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_MDID_RX_HASH_HIGH, 1); - ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_MDID_FLOW_ID_LOWER, 2); - - mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ? - ICE_MDID_SRC_VSI : ICE_MDID_FLOW_ID_HIGH; - - ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3); +#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX - ice_init_flex_flags(hw, prof_id); - break; - case ICE_RXDID_COMMS_GENERIC: - case ICE_RXDID_COMMS_AUX_VLAN: - case ICE_RXDID_COMMS_AUX_IPV4: - case ICE_RXDID_COMMS_AUX_IPV6: - case ICE_RXDID_COMMS_AUX_IPV6_FLOW: - case ICE_RXDID_COMMS_AUX_TCP: - ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_MDID_RX_HASH_LOW, 0); - ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_MDID_RX_HASH_HIGH, 1); - ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_MDID_FLOW_ID_LOWER, 2); - ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_MDID_FLOW_ID_HIGH, 3); - - if (prof_id == ICE_RXDID_COMMS_AUX_VLAN) { - /* FlexiMD.4: VLAN1 - single or EVLAN (first for QinQ). - * FlexiMD.5: VLAN2 - C-VLAN (second for QinQ). - */ - protid_0 = ICE_PROT_EVLAN_O; - offset_0 = 0; - protid_1 = ICE_PROT_VLAN_O; - offset_1 = 0; - } else if (prof_id == ICE_RXDID_COMMS_AUX_IPV4) { - /* FlexiMD.4: IPHDR1 - IPv4 header word 4, "TTL" and - * "Protocol" fields. - * FlexiMD.5: IPHDR0 - IPv4 header word 0, "Ver", - * "Hdr Len" and "Type of Service" fields. - */ - protid_0 = ICE_PROT_IPV4_OF_OR_S; - offset_0 = 8; - protid_1 = ICE_PROT_IPV4_OF_OR_S; - offset_1 = 0; - } else if (prof_id == ICE_RXDID_COMMS_AUX_IPV6) { - /* FlexiMD.4: IPHDR1 - IPv6 header word 3, - * "Next Header" and "Hop Limit" fields. - * FlexiMD.5: IPHDR0 - IPv6 header word 0, - * "Ver", "Traffic class" and high 4 bits of - * "Flow Label" fields. - */ - protid_0 = ICE_PROT_IPV6_OF_OR_S; - offset_0 = 6; - protid_1 = ICE_PROT_IPV6_OF_OR_S; - offset_1 = 0; - } else if (prof_id == ICE_RXDID_COMMS_AUX_IPV6_FLOW) { - /* FlexiMD.4: IPHDR1 - IPv6 header word 1, - * 16 low bits of the "Flow Label" field. - * FlexiMD.5: IPHDR0 - IPv6 header word 0, - * "Ver", "Traffic class" and high 4 bits - * of "Flow Label" fields. - */ - protid_0 = ICE_PROT_IPV6_OF_OR_S; - offset_0 = 2; - protid_1 = ICE_PROT_IPV6_OF_OR_S; - offset_1 = 0; - } else if (prof_id == ICE_RXDID_COMMS_AUX_TCP) { - /* FlexiMD.4: TCPHDR - TCP header word 6, - * "Data Offset" and "Flags" fields. - * FlexiMD.5: Reserved - */ - protid_0 = ICE_PROT_TCP_IL; - offset_0 = 12; - protid_1 = ICE_PROT_ID_INVAL; - offset_1 = 0; - } else { - protid_0 = ICE_PROT_ID_INVAL; - offset_0 = 0; - protid_1 = ICE_PROT_ID_INVAL; - offset_1 = 0; - } + /* Retrieve the transmit timer */ + val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); + tx_timer_val = val & + PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; + cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val); - ICE_PROG_FLEX_ENTRY_EXTRACT(hw, prof_id, - protid_0, offset_0, 4); - ICE_PROG_FLEX_ENTRY_EXTRACT(hw, prof_id, - protid_1, offset_1, 5); + /* Retrieve the fc threshold */ + val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); + fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; - ice_init_flex_flags(hw, prof_id); - break; - default: - ice_debug(hw, ICE_DBG_INIT, - "Field init for profile ID %d not supported\n", - prof_id); - } + cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val); } /** @@ -532,10 +470,8 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) enum ice_status ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) { - u16 fc_threshold_val, tx_timer_val; struct ice_aqc_set_mac_cfg *cmd; struct ice_aq_desc desc; - u32 reg_val; cmd = &desc.params.set_mac_cfg; @@ -546,27 +482,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) cmd->max_frame_size = CPU_TO_LE16(max_frame_size); - /* We read back the transmit timer and fc threshold value of - * LFC. Thus, we will use index = - * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. - * - * Also, because we are opearating on transmit timer and fc - * threshold of LFC, we don't turn on any bit in tx_tmr_priority - */ -#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX - - /* Retrieve the transmit timer */ - reg_val = rd32(hw, - PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); - tx_timer_val = reg_val & - PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; - cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val); - - /* Retrieve the fc threshold */ - reg_val = rd32(hw, - PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); - fc_threshold_val = reg_val & MAKEMASK(0xFFFF, 0); - cmd->fc_refresh_threshold = CPU_TO_LE16(fc_threshold_val); + ice_fill_tx_timer_and_fc_thresh(hw, cmd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -575,12 +491,14 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) * ice_init_fltr_mgmt_struct - initializes filter management list and locks * @hw: pointer to the HW struct */ -static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) +enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) { struct ice_switch_info *sw; + enum ice_status status; hw->switch_info = (struct ice_switch_info *) ice_malloc(hw, sizeof(*hw->switch_info)); + sw = hw->switch_info; if (!sw) @@ -588,27 +506,36 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); - return ice_init_def_sw_recp(hw); + status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list); + if (status) { + ice_free(hw, hw->switch_info); + return status; + } + return ICE_SUCCESS; } /** - * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks + * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct * @hw: pointer to the HW struct + * @sw: pointer to switch info struct for which function clears filters */ -static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) +static void +ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw) { - struct ice_switch_info *sw = hw->switch_info; struct ice_vsi_list_map_info *v_pos_map; struct ice_vsi_list_map_info *v_tmp_map; struct ice_sw_recipe *recps; u8 i; + if (!sw) + return; + LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, ice_vsi_list_map_info, list_entry) { LIST_DEL(&v_pos_map->list_entry); ice_free(hw, v_pos_map); } - recps = hw->switch_info->recp_list; + recps = sw->recp_list; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { struct ice_recp_grp_entry *rg_entry, *tmprg_entry; @@ -648,234 +575,25 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) if (recps[i].root_buf) ice_free(hw, recps[i].root_buf); } - ice_rm_all_sw_replay_rule_info(hw); + ice_rm_sw_replay_rule_info(hw, sw); ice_free(hw, sw->recp_list); ice_free(hw, sw); } -#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \ - (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry))) -#define ICE_FW_LOG_DESC_SIZE_MAX \ - ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX) - -/** - * ice_get_fw_log_cfg - get FW logging configuration - * @hw: pointer to the HW struct - */ -static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) -{ - struct ice_aqc_fw_logging_data *config; - struct ice_aq_desc desc; - enum ice_status status; - u16 size; - - size = ICE_FW_LOG_DESC_SIZE_MAX; - config = (struct ice_aqc_fw_logging_data *)ice_malloc(hw, size); - if (!config) - return ICE_ERR_NO_MEMORY; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); - - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF); - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); - - status = ice_aq_send_cmd(hw, &desc, config, size, NULL); - if (!status) { - u16 i; - - /* Save fw logging information into the HW structure */ - for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { - u16 v, m, flgs; - - v = LE16_TO_CPU(config->entry[i]); - m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; - flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; - - if (m < ICE_AQC_FW_LOG_ID_MAX) - hw->fw_log.evnts[m].cur = flgs; - } - } - - ice_free(hw, config); - - return status; -} - -/** - * ice_cfg_fw_log - configure FW logging - * @hw: pointer to the HW struct - * @enable: enable certain FW logging events if true, disable all if false - * - * This function enables/disables the FW logging via Rx CQ events and a UART - * port based on predetermined configurations. FW logging via the Rx CQ can be - * enabled/disabled for individual PF's. However, FW logging via the UART can - * only be enabled/disabled for all PFs on the same device. - * - * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in - * hw->fw_log need to be set accordingly, e.g. based on user-provided input, - * before initializing the device. - * - * When re/configuring FW logging, callers need to update the "cfg" elements of - * the hw->fw_log.evnts array with the desired logging event configurations for - * modules of interest. When disabling FW logging completely, the callers can - * just pass false in the "enable" parameter. On completion, the function will - * update the "cur" element of the hw->fw_log.evnts array with the resulting - * logging event configurations of the modules that are being re/configured. FW - * logging modules that are not part of a reconfiguration operation retain their - * previous states. - * - * Before resetting the device, it is recommended that the driver disables FW - * logging before shutting down the control queue. When disabling FW logging - * ("enable" = false), the latest configurations of FW logging events stored in - * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after - * a device reset. - * - * When enabling FW logging to emit log messages via the Rx CQ during the - * device's initialization phase, a mechanism alternative to interrupt handlers - * needs to be used to extract FW log messages from the Rx CQ periodically and - * to prevent the Rx CQ from being full and stalling other types of control - * messages from FW to SW. Interrupts are typically disabled during the device's - * initialization phase. - */ -static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) -{ - struct ice_aqc_fw_logging_data *data = NULL; - struct ice_aqc_fw_logging *cmd; - enum ice_status status = ICE_SUCCESS; - u16 i, chgs = 0, len = 0; - struct ice_aq_desc desc; - u8 actv_evnts = 0; - void *buf = NULL; - - if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) - return ICE_SUCCESS; - - /* Disable FW logging only when the control queue is still responsive */ - if (!enable && - (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) - return ICE_SUCCESS; - - /* Get current FW log settings */ - status = ice_get_fw_log_cfg(hw); - if (status) - return status; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); - cmd = &desc.params.fw_logging; - - /* Indicate which controls are valid */ - if (hw->fw_log.cq_en) - cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; - - if (hw->fw_log.uart_en) - cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; - - if (enable) { - /* Fill in an array of entries with FW logging modules and - * logging events being reconfigured. - */ - for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { - u16 val; - - /* Keep track of enabled event types */ - actv_evnts |= hw->fw_log.evnts[i].cfg; - - if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) - continue; - - if (!data) { - data = (struct ice_aqc_fw_logging_data *) - ice_malloc(hw, - ICE_FW_LOG_DESC_SIZE_MAX); - if (!data) - return ICE_ERR_NO_MEMORY; - } - - val = i << ICE_AQC_FW_LOG_ID_S; - val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; - data->entry[chgs++] = CPU_TO_LE16(val); - } - - /* Only enable FW logging if at least one module is specified. - * If FW logging is currently enabled but all modules are not - * enabled to emit log messages, disable FW logging altogether. - */ - if (actv_evnts) { - /* Leave if there is effectively no change */ - if (!chgs) - goto out; - - if (hw->fw_log.cq_en) - cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; - - if (hw->fw_log.uart_en) - cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; - - buf = data; - len = ICE_FW_LOG_DESC_SIZE(chgs); - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); - } - } - - status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); - if (!status) { - /* Update the current configuration to reflect events enabled. - * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW - * logging mode is enabled for the device. They do not reflect - * actual modules being enabled to emit log messages. So, their - * values remain unchanged even when all modules are disabled. - */ - u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; - - hw->fw_log.actv_evnts = actv_evnts; - for (i = 0; i < cnt; i++) { - u16 v, m; - - if (!enable) { - /* When disabling all FW logging events as part - * of device's de-initialization, the original - * configurations are retained, and can be used - * to reconfigure FW logging later if the device - * is re-initialized. - */ - hw->fw_log.evnts[i].cur = 0; - continue; - } - - v = LE16_TO_CPU(data->entry[i]); - m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; - hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; - } - } - -out: - if (data) - ice_free(hw, data); - - return status; -} - /** - * ice_output_fw_log + * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks * @hw: pointer to the HW struct - * @desc: pointer to the AQ message descriptor - * @buf: pointer to the buffer accompanying the AQ message - * - * Formats a FW Log message and outputs it via the standard driver logs. */ -void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) +void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) { - ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); - ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, - LE16_TO_CPU(desc->datalen)); - ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); + ice_cleanup_fltr_mgmt_single(hw, hw->switch_info); } /** - * ice_get_itr_intrl_gran - determine int/intrl granularity + * ice_get_itr_intrl_gran * @hw: pointer to the HW struct * - * Determines the itr/intrl granularities based on the maximum aggregate + * Determines the ITR/INTRL granularities based on the maximum aggregate * bandwidth according to the device's configuration during power-on. */ static void ice_get_itr_intrl_gran(struct ice_hw *hw) @@ -898,29 +616,6 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) } } -/** - * ice_get_nvm_version - get cached NVM version data - * @hw: pointer to the hardware structure - * @oem_ver: 8 bit NVM version - * @oem_build: 16 bit NVM build number - * @oem_patch: 8 NVM patch number - * @ver_hi: high 16 bits of the NVM version - * @ver_lo: low 16 bits of the NVM version - */ -void -ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, - u8 *oem_patch, u8 *ver_hi, u8 *ver_lo) -{ - struct ice_nvm_info *nvm = &hw->nvm; - - *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT); - *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK); - *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >> - ICE_OEM_VER_BUILD_SHIFT); - *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; - *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; -} - /** * ice_print_rollback_msg - print FW rollback message * @hw: pointer to the hardware structure @@ -928,16 +623,16 @@ ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build, void ice_print_rollback_msg(struct ice_hw *hw) { char nvm_str[ICE_NVM_VER_LEN] = { 0 }; - u8 oem_ver, oem_patch, ver_hi, ver_lo; - u16 oem_build; + struct ice_nvm_info *nvm = &hw->nvm; + struct ice_orom_info *orom; - ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi, - &ver_lo); - SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d", ver_hi, - ver_lo, hw->nvm.eetrack, oem_ver, oem_build, oem_patch); + orom = &nvm->orom; + SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d", + nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major, + orom->build, orom->patch); ice_warn(hw, - "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode", + "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n", nvm_str, hw->fw_maj_ver, hw->fw_min_ver); } @@ -954,7 +649,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - /* Set MAC type based on DeviceID */ status = ice_set_mac_type(hw); if (status) @@ -964,26 +658,23 @@ enum ice_status ice_init_hw(struct ice_hw *hw) PF_FUNC_RID_FUNCTION_NUMBER_M) >> PF_FUNC_RID_FUNCTION_NUMBER_S; - status = ice_reset(hw, ICE_RESET_PFR); if (status) return status; ice_get_itr_intrl_gran(hw); - status = ice_create_all_ctrlq(hw); if (status) goto err_unroll_cqinit; + status = ice_init_nvm(hw); + if (status) + goto err_unroll_cqinit; + if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK) ice_print_rollback_msg(hw); - /* Enable FW logging. Not fatal if this fails. */ - status = ice_cfg_fw_log(hw, true); - if (status) - ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); - status = ice_clear_pf_cfg(hw); if (status) goto err_unroll_cqinit; @@ -994,10 +685,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ice_clear_pxe_mode(hw); - status = ice_init_nvm(hw); - if (status) - goto err_unroll_cqinit; - status = ice_get_caps(hw); if (status) goto err_unroll_cqinit; @@ -1018,7 +705,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw) goto err_unroll_alloc; hw->evb_veb = true; - /* Query the allocated resources for Tx scheduler */ status = ice_sched_query_res_alloc(hw); if (status) { @@ -1026,7 +712,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) "Failed to get scheduler allocated resources\n"); goto err_unroll_alloc; } - + ice_sched_get_psm_clk_freq(hw); /* Initialize port_info struct with scheduler data */ status = ice_sched_init_port(hw->port_info); @@ -1066,7 +752,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_sched; - /* Get MAC information */ /* A single port can report up to two (LAN and WoL) addresses */ mac_buf = ice_calloc(hw, 2, @@ -1083,15 +768,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_fltr_mgmt_struct; - - ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); - ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); - ice_init_flex_flds(hw, ICE_RXDID_COMMS_GENERIC); - ice_init_flex_flds(hw, ICE_RXDID_COMMS_AUX_VLAN); - ice_init_flex_flds(hw, ICE_RXDID_COMMS_AUX_IPV4); - ice_init_flex_flds(hw, ICE_RXDID_COMMS_AUX_IPV6); - ice_init_flex_flds(hw, ICE_RXDID_COMMS_AUX_IPV6_FLOW); - ice_init_flex_flds(hw, ICE_RXDID_COMMS_AUX_TCP); + /* enable jumbo frame support at MAC level */ + status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); + if (status) + goto err_unroll_fltr_mgmt_struct; /* Obtain counter base index which would be used by flow director */ status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); if (status) @@ -1099,6 +779,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) status = ice_init_hw_tbls(hw); if (status) goto err_unroll_fltr_mgmt_struct; + ice_init_lock(&hw->tnl_lock); return ICE_SUCCESS; err_unroll_fltr_mgmt_struct: @@ -1130,14 +811,13 @@ void ice_deinit_hw(struct ice_hw *hw) ice_sched_clear_agg(hw); ice_free_seg(hw); ice_free_hw_tbls(hw); + ice_destroy_lock(&hw->tnl_lock); if (hw->port_info) { ice_free(hw, hw->port_info); hw->port_info = NULL; } - /* Attempt to disable FW logging before shutting down control queues */ - ice_cfg_fw_log(hw, false); ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ @@ -1150,7 +830,7 @@ void ice_deinit_hw(struct ice_hw *hw) */ enum ice_status ice_check_reset(struct ice_hw *hw) { - u32 cnt, reg = 0, grst_delay; + u32 cnt, reg = 0, grst_delay, uld_mask; /* Poll for Device Active state in case a recent CORER, GLOBR, * or EMPR has occurred. The grst delay value is in 100ms units. @@ -1172,13 +852,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw) return ICE_ERR_RESET_FAILED; } -#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ - GLNVM_ULD_GLOBR_DONE_M) +#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ + GLNVM_ULD_PCIER_DONE_1_M |\ + GLNVM_ULD_CORER_DONE_M |\ + GLNVM_ULD_GLOBR_DONE_M |\ + GLNVM_ULD_POR_DONE_M |\ + GLNVM_ULD_POR_DONE_1_M |\ + GLNVM_ULD_PCIER_DONE_2_M) + + uld_mask = ICE_RESET_DONE_MASK; /* Device is Active; check Global Reset processes are done */ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { - reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; - if (reg == ICE_RESET_DONE_MASK) { + reg = rd32(hw, GLNVM_ULD) & uld_mask; + if (reg == uld_mask) { ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); break; @@ -1278,79 +965,10 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) wr32(hw, GLGEN_RTRIG, val); ice_flush(hw); - /* wait for the FW to be ready */ return ice_check_reset(hw); } -/** - * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA - * @hw: pointer to hardware structure - * @module_tlv: pointer to module TLV to return - * @module_tlv_len: pointer to module TLV length to return - * @module_type: module type requested - * - * Finds the requested sub module TLV type from the Preserved Field - * Area (PFA) and returns the TLV pointer and length. The caller can - * use these to read the variable length TLV value. - */ -enum ice_status -ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, - u16 module_type) -{ - enum ice_status status; - u16 pfa_len, pfa_ptr; - u16 next_tlv; - - status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); - if (status != ICE_SUCCESS) { - ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); - return status; - } - status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); - if (status != ICE_SUCCESS) { - ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); - return status; - } - /* Starting with first TLV after PFA length, iterate through the list - * of TLVs to find the requested one. - */ - next_tlv = pfa_ptr + 1; - while (next_tlv < pfa_ptr + pfa_len) { - u16 tlv_sub_module_type; - u16 tlv_len; - - /* Read TLV type */ - status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); - if (status != ICE_SUCCESS) { - ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); - break; - } - /* Read TLV length */ - status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); - if (status != ICE_SUCCESS) { - ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); - break; - } - if (tlv_sub_module_type == module_type) { - if (tlv_len) { - *module_tlv = next_tlv; - *module_tlv_len = tlv_len; - return ICE_SUCCESS; - } - return ICE_ERR_INVAL_SIZE; - } - /* Check next TLV, i.e. current TLV pointer + length + 2 words - * (for current TLV's type and length) - */ - next_tlv = next_tlv + tlv_len + 2; - } - /* Module does not exist */ - return ICE_ERR_DOES_NOT_EXIST; -} - - - /** * ice_copy_rxq_ctx_to_hw * @hw: pointer to the hardware structure @@ -1429,11 +1047,10 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, rlan_ctx->prefena = 1; - ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); + ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); } -#if !defined(NO_UNUSED_CTX_CODE) || defined(AE_DRIVER) /** * ice_clear_rxq_ctx * @hw: pointer to the hardware structure @@ -1454,7 +1071,6 @@ enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index) return ICE_SUCCESS; } -#endif /* !NO_UNUSED_CTX_CODE || AE_DRIVER */ /* LAN Tx Queue Context */ const struct ice_ctx_ele ice_tlan_ctx_info[] = { @@ -1490,7 +1106,6 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { { 0 } }; -#if !defined(NO_UNUSED_CTX_CODE) || defined(AE_DRIVER) /** * ice_copy_tx_cmpltnq_ctx_to_hw * @hw: pointer to the hardware structure @@ -1555,7 +1170,7 @@ ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, { u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; - ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info); + ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info); return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index); } @@ -1646,7 +1261,8 @@ ice_write_tx_drbell_q_ctx(struct ice_hw *hw, { u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; - ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info); + ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf, + ice_tx_drbell_q_ctx_info); return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index); } @@ -1671,8 +1287,6 @@ ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index) return ICE_SUCCESS; } -#endif /* !NO_UNUSED_CTX_CODE || AE_DRIVER */ - /* FW Admin Queue command wrappers */ @@ -1690,6 +1304,28 @@ enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { + if (hw->aq_send_cmd_fn) { + enum ice_status status = ICE_ERR_NOT_READY; + u16 retval = ICE_AQ_RC_OK; + + ice_acquire_lock(&hw->adminq.sq_lock); + if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc, + buf, buf_size)) { + retval = LE16_TO_CPU(desc->retval); + /* strip off FW internal code */ + if (retval) + retval &= 0xff; + if (retval == ICE_AQ_RC_OK) + status = ICE_SUCCESS; + else + status = ICE_ERR_AQ_ERROR; + } + + hw->adminq.sq_last_status = (enum ice_aq_err)retval; + ice_release_lock(&hw->adminq.sq_lock); + + return status; + } return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); } @@ -2047,7 +1683,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) enum ice_status status; u16 buf_len; - buf_len = sizeof(*buf) + sizeof(buf->elem) * (num - 1); + buf_len = ice_struct_size(buf, elem, num - 1); buf = (struct ice_aqc_alloc_free_res_elem *) ice_malloc(hw, buf_len); if (!buf) @@ -2087,7 +1723,7 @@ ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) enum ice_status status; u16 buf_len; - buf_len = sizeof(*buf) + sizeof(buf->elem) * (num - 1); + buf_len = ice_struct_size(buf, elem, num - 1); buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!buf) return ICE_ERR_NO_MEMORY; @@ -2178,7 +1814,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, case ICE_AQC_CAPS_VALID_FUNCTIONS: caps->valid_functions = number; ice_debug(hw, ICE_DBG_INIT, - "%s: valid functions = %d\n", prefix, + "%s: valid_functions (bitmap) = %d\n", prefix, caps->valid_functions); /* store func count for resource management purposes */ @@ -2189,17 +1825,17 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, if (dev_p) { dev_p->num_vsi_allocd_to_host = number; ice_debug(hw, ICE_DBG_INIT, - "%s: num VSI alloc to host = %d\n", + "%s: num_vsi_allocd_to_host = %d\n", prefix, dev_p->num_vsi_allocd_to_host); } else if (func_p) { func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); ice_debug(hw, ICE_DBG_INIT, - "%s: num guaranteed VSI (fw) = %d\n", + "%s: guar_num_vsi (fw) = %d\n", prefix, number); ice_debug(hw, ICE_DBG_INIT, - "%s: num guaranteed VSI = %d\n", + "%s: guar_num_vsi = %d\n", prefix, func_p->guar_num_vsi); } break; @@ -2208,64 +1844,65 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, caps->active_tc_bitmap = logical_id; caps->maxtc = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: DCB = %d\n", prefix, caps->dcb); + "%s: dcb = %d\n", prefix, caps->dcb); ice_debug(hw, ICE_DBG_INIT, - "%s: active TC bitmap = %d\n", prefix, + "%s: active_tc_bitmap = %d\n", prefix, caps->active_tc_bitmap); ice_debug(hw, ICE_DBG_INIT, - "%s: TC max = %d\n", prefix, caps->maxtc); + "%s: maxtc = %d\n", prefix, caps->maxtc); break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; ice_debug(hw, ICE_DBG_INIT, - "%s: RSS table size = %d\n", prefix, + "%s: rss_table_size = %d\n", prefix, caps->rss_table_size); ice_debug(hw, ICE_DBG_INIT, - "%s: RSS table width = %d\n", prefix, + "%s: rss_table_entry_width = %d\n", prefix, caps->rss_table_entry_width); break; case ICE_AQC_CAPS_RXQS: caps->num_rxq = number; caps->rxq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: num Rx queues = %d\n", prefix, + "%s: num_rxq = %d\n", prefix, caps->num_rxq); ice_debug(hw, ICE_DBG_INIT, - "%s: Rx first queue ID = %d\n", prefix, + "%s: rxq_first_id = %d\n", prefix, caps->rxq_first_id); break; case ICE_AQC_CAPS_TXQS: caps->num_txq = number; caps->txq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: num Tx queues = %d\n", prefix, + "%s: num_txq = %d\n", prefix, caps->num_txq); ice_debug(hw, ICE_DBG_INIT, - "%s: Tx first queue ID = %d\n", prefix, + "%s: txq_first_id = %d\n", prefix, caps->txq_first_id); break; case ICE_AQC_CAPS_MSIX: caps->num_msix_vectors = number; caps->msix_vector_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "%s: MSIX vector count = %d\n", prefix, + "%s: num_msix_vectors = %d\n", prefix, caps->num_msix_vectors); ice_debug(hw, ICE_DBG_INIT, - "%s: MSIX first vector index = %d\n", prefix, + "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; case ICE_AQC_CAPS_FD: - { - u32 reg_val, val; - if (dev_p) { dev_p->num_flow_director_fltr = number; ice_debug(hw, ICE_DBG_INIT, - "%s: num FD filters = %d\n", prefix, + "%s: num_flow_director_fltr = %d\n", + prefix, dev_p->num_flow_director_fltr); } if (func_p) { + u32 reg_val, val; + if (hw->dcf_enabled) + break; reg_val = rd32(hw, GLQF_FD_SIZE); val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> GLQF_FD_SIZE_FD_GSIZE_S; @@ -2275,17 +1912,16 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, GLQF_FD_SIZE_FD_BSIZE_S; func_p->fd_fltr_best_effort = val; ice_debug(hw, ICE_DBG_INIT, - "%s: num guaranteed FD filters = %d\n", + "%s: fd_fltr_guar = %d\n", prefix, func_p->fd_fltr_guar); ice_debug(hw, ICE_DBG_INIT, - "%s: num best effort FD filters = %d\n", + "%s: fd_fltr_best_effort = %d\n", prefix, func_p->fd_fltr_best_effort); } break; - } case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; - ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n", + ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", prefix, caps->max_mtu); break; default: @@ -2304,7 +1940,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, /* Max 4 TCs per port */ caps->maxtc = 4; ice_debug(hw, ICE_DBG_INIT, - "%s: TC max = %d (based on #ports)\n", prefix, + "%s: maxtc = %d (based on #ports)\n", prefix, caps->maxtc); } } @@ -2393,6 +2029,70 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) return status; } +/** + * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode + * @hw: pointer to the hardware structure + */ +void ice_set_safe_mode_caps(struct ice_hw *hw) +{ + struct ice_hw_func_caps *func_caps = &hw->func_caps; + struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; + u32 valid_func, rxq_first_id, txq_first_id; + u32 msix_vector_first_id, max_mtu; + u32 num_funcs; + + /* cache some func_caps values that should be restored after memset */ + valid_func = func_caps->common_cap.valid_functions; + txq_first_id = func_caps->common_cap.txq_first_id; + rxq_first_id = func_caps->common_cap.rxq_first_id; + msix_vector_first_id = func_caps->common_cap.msix_vector_first_id; + max_mtu = func_caps->common_cap.max_mtu; + + /* unset func capabilities */ + memset(func_caps, 0, sizeof(*func_caps)); + + /* restore cached values */ + func_caps->common_cap.valid_functions = valid_func; + func_caps->common_cap.txq_first_id = txq_first_id; + func_caps->common_cap.rxq_first_id = rxq_first_id; + func_caps->common_cap.msix_vector_first_id = msix_vector_first_id; + func_caps->common_cap.max_mtu = max_mtu; + + /* one Tx and one Rx queue in safe mode */ + func_caps->common_cap.num_rxq = 1; + func_caps->common_cap.num_txq = 1; + + /* two MSIX vectors, one for traffic and one for misc causes */ + func_caps->common_cap.num_msix_vectors = 2; + func_caps->guar_num_vsi = 1; + + /* cache some dev_caps values that should be restored after memset */ + valid_func = dev_caps->common_cap.valid_functions; + txq_first_id = dev_caps->common_cap.txq_first_id; + rxq_first_id = dev_caps->common_cap.rxq_first_id; + msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id; + max_mtu = dev_caps->common_cap.max_mtu; + num_funcs = dev_caps->num_funcs; + + /* unset dev capabilities */ + memset(dev_caps, 0, sizeof(*dev_caps)); + + /* restore cached values */ + dev_caps->common_cap.valid_functions = valid_func; + dev_caps->common_cap.txq_first_id = txq_first_id; + dev_caps->common_cap.rxq_first_id = rxq_first_id; + dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id; + dev_caps->common_cap.max_mtu = max_mtu; + dev_caps->num_funcs = num_funcs; + + /* one Tx and one Rx queue per function in safe mode */ + dev_caps->common_cap.num_rxq = num_funcs; + dev_caps->common_cap.num_txq = num_funcs; + + /* two MSIX vectors per function */ + dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; +} + /** * ice_get_caps - get info about the HW * @hw: pointer to the hardware structure @@ -2428,11 +2128,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); cmd->flags = flags; - - - /* Prep values for flags, sah, sal */ - cmd->sah = HTONS(*((const u16 *)mac_addr)); - cmd->sal = HTONL(*((const u32 *)(mac_addr + 2))); + ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -2466,7 +2162,6 @@ void ice_clear_pxe_mode(struct ice_hw *hw) ice_aq_clear_pxe_mode(hw); } - /** * ice_get_link_speed_based_on_phy_type - returns link speed * @phy_type_low: lower part of phy_type @@ -2683,14 +2378,17 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", (unsigned long long)LE64_TO_CPU(cfg->phy_type_high)); ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps); - ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n", - cfg->low_power_ctrl); + ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl_an = 0x%x\n", + cfg->low_power_ctrl_an); ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap); ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value); ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt); status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); + if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) + status = ICE_SUCCESS; + if (!status) pi->phy.curr_user_phy_cfg = *cfg; @@ -2771,33 +2469,86 @@ ice_cache_phy_user_req(struct ice_port_info *pi, } /** - * ice_set_fc - * @pi: port information structure - * @aq_failures: pointer to status code, specific to ice_set_fc routine - * @ena_auto_link_update: enable automatic link update + * ice_caps_to_fc_mode + * @caps: PHY capabilities * - * Set the requested flow control mode. + * Convert PHY FC capabilities to ice FC mode */ -enum ice_status -ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) +enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) { - struct ice_aqc_set_phy_cfg_data cfg = { 0 }; + if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && + caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) + return ICE_FC_FULL; + + if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) + return ICE_FC_TX_PAUSE; + + if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) + return ICE_FC_RX_PAUSE; + + return ICE_FC_NONE; +} + +/** + * ice_caps_to_fec_mode + * @caps: PHY capabilities + * @fec_options: Link FEC options + * + * Convert PHY FEC capabilities to ice FEC mode + */ +enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) +{ + if (caps & ICE_AQC_PHY_EN_AUTO_FEC) + return ICE_FEC_AUTO; + + if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | + ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | + ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | + ICE_AQC_PHY_FEC_25G_KR_REQ)) + return ICE_FEC_BASER; + + if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | + ICE_AQC_PHY_FEC_25G_RS_544_REQ | + ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) + return ICE_FEC_RS; + + return ICE_FEC_NONE; +} + +static enum ice_status +ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + enum ice_fc_mode req_mode) +{ + struct ice_aqc_get_phy_caps_data *pcaps = NULL; struct ice_phy_cache_mode_data cache_data; - struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; + enum ice_status status = ICE_SUCCESS; u8 pause_mask = 0x0; - struct ice_hw *hw; - if (!pi) - return ICE_ERR_PARAM; - hw = pi->hw; - *aq_failures = ICE_SET_FC_AQ_FAIL_NONE; + if (!pi || !cfg) + return ICE_ERR_BAD_PTR; + + pcaps = (struct ice_aqc_get_phy_caps_data *) + ice_malloc(pi->hw, sizeof(*pcaps)); + if (!pcaps) + return ICE_ERR_NO_MEMORY; /* Cache user FC request */ - cache_data.data.curr_user_fc_req = pi->fc.req_mode; + cache_data.data.curr_user_fc_req = req_mode; ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); - switch (pi->fc.req_mode) { + switch (req_mode) { + case ICE_FC_AUTO: + /* Query the value of FC that both the NIC and attached media + * can do. + */ + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + pcaps, NULL); + if (status) + goto out; + + pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE; + pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE; + break; case ICE_FC_FULL: pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; @@ -2812,6 +2563,39 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) break; } + /* clear the old pause settings */ + cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | + ICE_AQC_PHY_EN_RX_LINK_PAUSE); + + /* set the new capabilities */ + cfg->caps |= pause_mask; + +out: + ice_free(pi->hw, pcaps); + return status; +} + +/** + * ice_set_fc + * @pi: port information structure + * @aq_failures: pointer to status code, specific to ice_set_fc routine + * @ena_auto_link_update: enable automatic link update + * + * Set the requested flow control mode. + */ +enum ice_status +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) +{ + struct ice_aqc_set_phy_cfg_data cfg = { 0 }; + struct ice_aqc_get_phy_caps_data *pcaps; + enum ice_status status; + struct ice_hw *hw; + + if (!pi || !aq_failures) + return ICE_ERR_BAD_PTR; + + hw = pi->hw; + pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(hw, sizeof(*pcaps)); if (!pcaps) @@ -2825,12 +2609,16 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) goto out; } - /* clear the old pause settings */ - cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | - ICE_AQC_PHY_EN_RX_LINK_PAUSE); + ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); - /* set the new capabilities */ - cfg.caps |= pause_mask; + /* Configure the set phy data */ + status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); + if (status) { + if (status != ICE_ERR_BAD_PTR) + *aq_failures = ICE_SET_FC_AQ_FAIL_GET; + + goto out; + } /* If the capabilities have changed, then set the new config */ if (cfg.caps != pcaps->caps) { @@ -2839,13 +2627,6 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) /* Auto restart link so settings take effect */ if (ena_auto_link_update) cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - /* Copy over all the old settings */ - cfg.phy_type_high = pcaps->phy_type_high; - cfg.phy_type_low = pcaps->phy_type_low; - cfg.low_power_ctrl = pcaps->low_power_ctrl; - cfg.eee_cap = pcaps->eee_cap; - cfg.eeer_value = pcaps->eeer_value; - cfg.link_fec_opt = pcaps->link_fec_options; status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); if (status) { @@ -2876,8 +2657,45 @@ out: return status; } +/** + * ice_phy_caps_equals_cfg + * @phy_caps: PHY capabilities + * @phy_cfg: PHY configuration + * + * Helper function to determine if PHY capabilities matches PHY + * configuration + */ +bool +ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, + struct ice_aqc_set_phy_cfg_data *phy_cfg) +{ + u8 caps_mask, cfg_mask; + + if (!phy_caps || !phy_cfg) + return false; + + /* These bits are not common between capabilities and configuration. + * Do not use them to determine equality. + */ + caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | + ICE_AQC_PHY_EN_MOD_QUAL); + cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + + if (phy_caps->phy_type_low != phy_cfg->phy_type_low || + phy_caps->phy_type_high != phy_cfg->phy_type_high || + ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || + phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || + phy_caps->eee_cap != phy_cfg->eee_cap || + phy_caps->eeer_value != phy_cfg->eeer_value || + phy_caps->link_fec_options != phy_cfg->link_fec_opt) + return false; + + return true; +} + /** * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data + * @pi: port information structure * @caps: PHY ability structure to copy date from * @cfg: PHY configuration structure to copy data to * @@ -2885,33 +2703,65 @@ out: * data structure */ void -ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, +ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, + struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg) { - if (!caps || !cfg) + if (!pi || !caps || !cfg) return; + ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM); cfg->phy_type_low = caps->phy_type_low; cfg->phy_type_high = caps->phy_type_high; cfg->caps = caps->caps; - cfg->low_power_ctrl = caps->low_power_ctrl; + cfg->low_power_ctrl_an = caps->low_power_ctrl_an; cfg->eee_cap = caps->eee_cap; cfg->eeer_value = caps->eeer_value; cfg->link_fec_opt = caps->link_fec_options; + cfg->module_compliance_enforcement = + caps->module_compliance_enforcement; + + if (ice_fw_supports_link_override(pi->hw)) { + struct ice_link_default_override_tlv tlv; + + if (ice_get_link_default_override(&tlv, pi)) + return; + + if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) + cfg->module_compliance_enforcement |= + ICE_LINK_OVERRIDE_STRICT_MODE; + } } /** * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode + * @pi: port information structure * @cfg: PHY configuration data to set FEC mode * @fec: FEC mode to configure - * - * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC - * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps - * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling. */ -void -ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) +enum ice_status +ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + enum ice_fec_mode fec) { + struct ice_aqc_get_phy_caps_data *pcaps; + enum ice_status status = ICE_SUCCESS; + struct ice_hw *hw; + + if (!pi || !cfg) + return ICE_ERR_BAD_PTR; + + hw = pi->hw; + + pcaps = (struct ice_aqc_get_phy_caps_data *) + ice_malloc(hw, sizeof(*pcaps)); + if (!pcaps) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + NULL); + if (status) + goto out; + switch (fec) { case ICE_FEC_BASER: /* Clear RS bits, and AND BASE-R ability @@ -2937,8 +2787,28 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) case ICE_FEC_AUTO: /* AND auto FEC bit, and all caps bits. */ cfg->caps &= ICE_AQC_PHY_CAPS_MASK; + cfg->link_fec_opt |= pcaps->link_fec_options; + break; + default: + status = ICE_ERR_PARAM; break; } + + if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { + struct ice_link_default_override_tlv tlv; + + if (ice_get_link_default_override(&tlv, pi)) + goto out; + + if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && + (tlv.options & ICE_LINK_OVERRIDE_EN)) + cfg->link_fec_opt = tlv.fec_options; + } + +out: + ice_free(hw, pcaps); + + return status; } /** @@ -3052,7 +2922,6 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } - /** * ice_aq_set_port_id_led * @pi: pointer to the port information @@ -3073,7 +2942,6 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); - if (is_orig_mode) cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; else @@ -3082,6 +2950,52 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } +/** + * ice_aq_sff_eeprom + * @hw: pointer to the HW struct + * @lport: bits [7:0] = logical port, bit [8] = logical port valid + * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) + * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. + * @page: QSFP page + * @set_page: set or ignore the page + * @data: pointer to data buffer to be read/written to the I2C device. + * @length: 1-16 for read, 1 for write. + * @write: 0 read, 1 for write. + * @cd: pointer to command details structure or NULL + * + * Read/Write SFF EEPROM (0x06EE) + */ +enum ice_status +ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, + u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, + bool write, struct ice_sq_cd *cd) +{ + struct ice_aqc_sff_eeprom *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (!data || (mem_addr & 0xff00)) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); + cmd = &desc.params.read_write_sff_param; + desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF); + cmd->lport_num = (u8)(lport & 0xff); + cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); + cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) & + ICE_AQC_SFF_I2CBUS_7BIT_M) | + ((set_page << + ICE_AQC_SFF_SET_EEPROM_PAGE_S) & + ICE_AQC_SFF_SET_EEPROM_PAGE_M)); + cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff); + cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); + if (write) + cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE); + + status = ice_aq_send_cmd(hw, &desc, data, length, cd); + return status; +} + /** * __ice_aq_get_set_rss_lut * @hw: pointer to the hardware structure @@ -3444,6 +3358,76 @@ do_aq: return status; } +/** + * ice_aq_move_recfg_lan_txq + * @hw: pointer to the hardware structure + * @num_qs: number of queues to move/reconfigure + * @is_move: true if this operation involves node movement + * @is_tc_change: true if this operation involves a TC change + * @subseq_call: true if this operation is a subsequent call + * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN + * @timeout: timeout in units of 100 usec (valid values 0-50) + * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN + * @buf: struct containing src/dest TEID and per-queue info + * @buf_size: size of buffer for indirect command + * @txqs_moved: out param, number of queues successfully moved + * @cd: pointer to command details structure or NULL + * + * Move / Reconfigure Tx LAN queues (0x0C32) + */ +enum ice_status +ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, + bool is_tc_change, bool subseq_call, bool flush_pipe, + u8 timeout, u32 *blocked_cgds, + struct ice_aqc_move_txqs_data *buf, u16 buf_size, + u8 *txqs_moved, struct ice_sq_cd *cd) +{ + struct ice_aqc_move_txqs *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.move_txqs; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs); + +#define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50 + if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX) + return ICE_ERR_PARAM; + + if (is_tc_change && !flush_pipe && !blocked_cgds) + return ICE_ERR_PARAM; + + if (!is_move && !is_tc_change) + return ICE_ERR_PARAM; + + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + if (is_move) + cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE; + + if (is_tc_change) + cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE; + + if (subseq_call) + cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL; + + if (flush_pipe) + cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE; + + cmd->num_qs = num_qs; + cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) & + ICE_AQC_Q_CMD_TIMEOUT_M); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + + if (!status && txqs_moved) + *txqs_moved = cmd->num_qs; + + if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN && + is_tc_change && !flush_pipe) + *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds); + + return status; +} /* End of FW Admin Queue command wrappers */ @@ -3633,12 +3617,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) /** * ice_set_ctx - set context bits in packed structure + * @hw: pointer to the hardware structure * @src_ctx: pointer to a generic non-packed context structure * @dest_ctx: pointer to memory for the packed structure * @ce_info: a description of the structure to be transformed */ enum ice_status -ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) { int f; @@ -3647,6 +3633,12 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * using the correct size so that we are correct regardless * of the endianness of the machine. */ + if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { + ice_debug(hw, ICE_DBG_QCTX, + "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", + f, ce_info[f].width, ce_info[f].size_of); + continue; + } switch (ce_info[f].size_of) { case sizeof(u8): ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); @@ -3668,9 +3660,6 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) return ICE_SUCCESS; } - - - /** * ice_read_byte - read context byte into struct * @src_ctx: the context structure to read from @@ -4088,7 +4077,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, * This function adds/updates the VSI queues per TC. */ static enum ice_status -ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, +ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *maxqs, u8 owner) { enum ice_status status = ICE_SUCCESS; @@ -4127,28 +4116,40 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, * This function adds/updates the VSI LAN queues per TC. */ enum ice_status -ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, +ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_lanqs) { return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, ICE_SCHED_NODE_OWNER_LAN); } - +/** + * ice_is_main_vsi - checks whether the VSI is main VSI + * @hw: pointer to the HW struct + * @vsi_handle: VSI handle + * + * Checks whether the VSI is the main VSI (the first PF VSI created on + * given PF). + */ +static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle) +{ + return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle]; +} /** * ice_replay_pre_init - replay pre initialization * @hw: pointer to the HW struct + * @sw: pointer to switch info struct for which function initializes filters * * Initializes required config data for VSI, FD, ACL, and RSS before replay. */ -static enum ice_status ice_replay_pre_init(struct ice_hw *hw) +static enum ice_status +ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw) { - struct ice_switch_info *sw = hw->switch_info; u8 i; /* Delete old entries from replay filter list head if there is any */ - ice_rm_all_sw_replay_rule_info(hw); + ice_rm_sw_replay_rule_info(hw, sw); /* In start of replay, move entries into replay_rules list, it * will allow adding rules entries back to filt_rules list, * which is operational list. @@ -4158,7 +4159,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw) &sw->recp_list[i].filt_replay_rules); ice_sched_replay_agg_vsi_preinit(hw); - return ice_sched_replay_tc_node_bw(hw); + return ice_sched_replay_tc_node_bw(hw->port_info); } /** @@ -4171,14 +4172,16 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw) */ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) { + struct ice_switch_info *sw = hw->switch_info; + struct ice_port_info *pi = hw->port_info; enum ice_status status; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; /* Replay pre-initialization if there is any */ - if (vsi_handle == ICE_MAIN_VSI_HANDLE) { - status = ice_replay_pre_init(hw); + if (ice_is_main_vsi(hw, vsi_handle)) { + status = ice_replay_pre_init(hw, sw); if (status) return status; } @@ -4187,7 +4190,7 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) if (status) return status; /* Replay per VSI all filters */ - status = ice_replay_vsi_all_fltr(hw, vsi_handle); + status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle); if (!status) status = ice_replay_vsi_agg(hw, vsi_handle); return status; @@ -4282,6 +4285,56 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, *prev_stat = new_data; } +/** + * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values + * @hw: ptr to the hardware info + * @vsi_handle: VSI handle + * @prev_stat_loaded: bool to specify if the previous stat values are loaded + * @cur_stats: ptr to current stats structure + * + * The GLV_REPC statistic register actually tracks two 16bit statistics, and + * thus cannot be read using the normal ice_stat_update32 function. + * + * Read the GLV_REPC register associated with the given VSI, and update the + * rx_no_desc and rx_error values in the ice_eth_stats structure. + * + * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be + * cleared each time it's read. + * + * Note that the GLV_RDPC register also counts the causes that would trigger + * GLV_REPC. However, it does not give the finer grained detail about why the + * packets are being dropped. The GLV_REPC values can be used to distinguish + * whether Rx packets are dropped due to errors or due to no available + * descriptors. + */ +void +ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded, + struct ice_eth_stats *cur_stats) +{ + u16 vsi_num, no_desc, error_cnt; + u32 repc; + + if (!ice_is_vsi_valid(hw, vsi_handle)) + return; + + vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); + + /* If we haven't loaded stats yet, just clear the current value */ + if (!prev_stat_loaded) { + wr32(hw, GLV_REPC(vsi_num), 0); + return; + } + + repc = rd32(hw, GLV_REPC(vsi_num)); + no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S; + error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S; + + /* Clear the count by writing to the stats register */ + wr32(hw, GLV_REPC(vsi_num), 0); + + cur_stats->rx_no_desc += no_desc; + cur_stats->rx_errors += error_cnt; +} /** * ice_sched_query_elem - query element information from HW @@ -4331,3 +4384,106 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw) else return ICE_FW_MODE_NORMAL; } + +/** + * ice_fw_supports_link_override + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports link override + */ +bool ice_fw_supports_link_override(struct ice_hw *hw) +{ + /* Currently, only supported for E810 devices */ + if (hw->mac_type != ICE_MAC_E810) + return false; + + if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { + if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) + return true; + if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && + hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) + return true; + } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { + return true; + } + + return false; +} + +/** + * ice_get_link_default_override + * @ldo: pointer to the link default override struct + * @pi: pointer to the port info struct + * + * Gets the link default override for a port + */ +enum ice_status +ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, + struct ice_port_info *pi) +{ + u16 i, tlv, tlv_len, tlv_start, buf, offset; + struct ice_hw *hw = pi->hw; + enum ice_status status; + + status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, + ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read link override TLV.\n"); + return status; + } + + /* Each port has its own config; calculate for our port */ + tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + + ICE_SR_PFA_LINK_OVERRIDE_OFFSET; + + /* link options first */ + status = ice_read_sr_word(hw, tlv_start, &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override link options.\n"); + return status; + } + ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; + ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> + ICE_LINK_OVERRIDE_PHY_CFG_S; + + /* link PHY config */ + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; + status = ice_read_sr_word(hw, offset, &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override phy config.\n"); + return status; + } + ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; + + /* PHY types low */ + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; + for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { + status = ice_read_sr_word(hw, (offset + i), &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override link options.\n"); + return status; + } + /* shift 16 bits at a time to fill 64 bits */ + ldo->phy_type_low |= ((u64)buf << (i * 16)); + } + + /* PHY types high */ + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + + ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; + for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { + status = ice_read_sr_word(hw, (offset + i), &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override link options.\n"); + return status; + } + /* shift 16 bits at a time to fill 64 bits */ + ldo->phy_type_high |= ((u64)buf << (i * 16)); + } + + return status; +}