X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fbase%2Fice_common.c;h=199430e2811889685d9eb8ca2d299191feca8e3f;hb=fdafeee39a8424894cddabb739ad3f2091a58bbd;hp=c1af243221acc913530468fee8a38510be55fc1b;hpb=f21a02d833391030e7d2e19ac7e061c19bfd5310;p=dpdk.git diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c index c1af243221..199430e281 100644 --- a/drivers/net/ice/base/ice_common.c +++ b/drivers/net/ice/base/ice_common.c @@ -449,11 +449,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) { u16 fc_threshold_val, tx_timer_val; struct ice_aqc_set_mac_cfg *cmd; - struct ice_port_info *pi; struct ice_aq_desc desc; - enum ice_status status; - u8 port_num = 0; - bool link_up; u32 reg_val; cmd = &desc.params.set_mac_cfg; @@ -465,21 +461,6 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) cmd->max_frame_size = CPU_TO_LE16(max_frame_size); - /* Retrieve the current data_pacing value in FW*/ - pi = &hw->port_info[port_num]; - - /* We turn on the get_link_info so that ice_update_link_info(...) - * can be called. - */ - pi->phy.get_link_info = 1; - - status = ice_get_link_status(pi, &link_up); - - if (status) - return status; - - cmd->params = pi->phy.link_info.pacing; - /* We read back the transmit timer and fc threshold value of * LFC. Thus, we will use index = * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. @@ -544,7 +525,15 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) } recps = hw->switch_info->recp_list; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { + struct ice_recp_grp_entry *rg_entry, *tmprg_entry; + recps[i].root_rid = i; + LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry, + &recps[i].rg_list, ice_recp_grp_entry, + l_entry) { + LIST_DEL(&rg_entry->l_entry); + ice_free(hw, rg_entry); + } if (recps[i].adv_rule) { struct ice_adv_fltr_mgmt_list_entry *tmp_entry; @@ -571,6 +560,8 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) ice_free(hw, lst_itr); } } + if (recps[i].root_buf) + ice_free(hw, recps[i].root_buf); } ice_rm_all_sw_replay_rule_info(hw); ice_free(hw, sw->recp_list); @@ -789,10 +780,10 @@ out: */ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) { - ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n"); - ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf, + ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); + ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, LE16_TO_CPU(desc->datalen)); - ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n"); + ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); } /** @@ -833,7 +824,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) u16 mac_buf_len; void *mac_buf; - ice_debug(hw, ICE_DBG_TRACE, "ice_init_hw"); + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Set MAC type based on DeviceID */ @@ -853,7 +844,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ice_get_itr_intrl_gran(hw); - status = ice_init_all_ctrlq(hw); + status = ice_create_all_ctrlq(hw); if (status) goto err_unroll_cqinit; @@ -981,7 +972,7 @@ err_unroll_alloc: ice_free(hw, hw->port_info); hw->port_info = NULL; err_unroll_cqinit: - ice_shutdown_all_ctrlq(hw); + ice_destroy_all_ctrlq(hw); return status; } @@ -1010,7 +1001,7 @@ void ice_deinit_hw(struct ice_hw *hw) /* Attempt to disable FW logging before shutting down control queues */ ice_cfg_fw_log(hw, false); - ice_shutdown_all_ctrlq(hw); + ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ ice_clear_all_vsi_ctx(hw); @@ -1213,6 +1204,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), + ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), { 0 } }; @@ -1223,7 +1215,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * @rxq_index: the index of the Rx queue * * Converts rxq context from sparse to dense structure and then writes - * it to HW register space + * it to HW register space and enables the hardware to prefetch descriptors + * instead of only fetching them on demand */ enum ice_status ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, @@ -1231,6 +1224,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + if (!rlan_ctx) + return ICE_ERR_BAD_PTR; + + rlan_ctx->prefena = 1; + ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); } @@ -1623,7 +1621,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, struct ice_aq_desc desc; enum ice_status status; - ice_debug(hw, ICE_DBG_TRACE, "ice_aq_req_res"); + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd_resp = &desc.params.res_owner; @@ -1692,7 +1690,7 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, struct ice_aqc_req_res *cmd; struct ice_aq_desc desc; - ice_debug(hw, ICE_DBG_TRACE, "ice_aq_release_res"); + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.res_owner; @@ -1722,7 +1720,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, u32 time_left = timeout; enum ice_status status; - ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_res"); + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); @@ -1780,7 +1778,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) enum ice_status status; u32 total_delay = 0; - ice_debug(hw, ICE_DBG_TRACE, "ice_release_res"); + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_aq_release_res(hw, res, 0, NULL); @@ -1814,7 +1812,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_cmd *cmd; struct ice_aq_desc desc; - ice_debug(hw, ICE_DBG_TRACE, "ice_aq_alloc_free_res"); + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.sw_res_ctrl; @@ -1948,6 +1946,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, struct ice_hw_func_caps *func_p = NULL; struct ice_hw_dev_caps *dev_p = NULL; struct ice_hw_common_caps *caps; + char const *prefix; u32 i; if (!buf) @@ -1958,9 +1957,11 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, if (opc == ice_aqc_opc_list_dev_caps) { dev_p = &hw->dev_caps; caps = &dev_p->common_cap; + prefix = "dev cap"; } else if (opc == ice_aqc_opc_list_func_caps) { func_p = &hw->func_caps; caps = &func_p->common_cap; + prefix = "func cap"; } else { ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); return; @@ -1976,21 +1977,25 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, case ICE_AQC_CAPS_VALID_FUNCTIONS: caps->valid_functions = number; ice_debug(hw, ICE_DBG_INIT, - "HW caps: Valid Functions = %d\n", + "%s: valid functions = %d\n", prefix, caps->valid_functions); break; case ICE_AQC_CAPS_VSI: if (dev_p) { dev_p->num_vsi_allocd_to_host = number; ice_debug(hw, ICE_DBG_INIT, - "HW caps: Dev.VSI cnt = %d\n", + "%s: num VSI alloc to host = %d\n", + prefix, dev_p->num_vsi_allocd_to_host); } else if (func_p) { func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); ice_debug(hw, ICE_DBG_INIT, - "HW caps: Func.VSI cnt = %d\n", - number); + "%s: num guaranteed VSI (fw) = %d\n", + prefix, number); + ice_debug(hw, ICE_DBG_INIT, + "%s: num guaranteed VSI = %d\n", + prefix, func_p->guar_num_vsi); } break; case ICE_AQC_CAPS_DCB: @@ -1998,49 +2003,51 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, caps->active_tc_bitmap = logical_id; caps->maxtc = phys_id; ice_debug(hw, ICE_DBG_INIT, - "HW caps: DCB = %d\n", caps->dcb); + "%s: DCB = %d\n", prefix, caps->dcb); ice_debug(hw, ICE_DBG_INIT, - "HW caps: Active TC bitmap = %d\n", + "%s: active TC bitmap = %d\n", prefix, caps->active_tc_bitmap); ice_debug(hw, ICE_DBG_INIT, - "HW caps: TC Max = %d\n", caps->maxtc); + "%s: TC max = %d\n", prefix, caps->maxtc); break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; ice_debug(hw, ICE_DBG_INIT, - "HW caps: RSS table size = %d\n", + "%s: RSS table size = %d\n", prefix, caps->rss_table_size); ice_debug(hw, ICE_DBG_INIT, - "HW caps: RSS table width = %d\n", + "%s: RSS table width = %d\n", prefix, caps->rss_table_entry_width); break; case ICE_AQC_CAPS_RXQS: caps->num_rxq = number; caps->rxq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "HW caps: Num Rx Qs = %d\n", caps->num_rxq); + "%s: num Rx queues = %d\n", prefix, + caps->num_rxq); ice_debug(hw, ICE_DBG_INIT, - "HW caps: Rx first queue ID = %d\n", + "%s: Rx first queue ID = %d\n", prefix, caps->rxq_first_id); break; case ICE_AQC_CAPS_TXQS: caps->num_txq = number; caps->txq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "HW caps: Num Tx Qs = %d\n", caps->num_txq); + "%s: num Tx queues = %d\n", prefix, + caps->num_txq); ice_debug(hw, ICE_DBG_INIT, - "HW caps: Tx first queue ID = %d\n", + "%s: Tx first queue ID = %d\n", prefix, caps->txq_first_id); break; case ICE_AQC_CAPS_MSIX: caps->num_msix_vectors = number; caps->msix_vector_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, - "HW caps: MSIX vector count = %d\n", + "%s: MSIX vector count = %d\n", prefix, caps->num_msix_vectors); ice_debug(hw, ICE_DBG_INIT, - "HW caps: MSIX first vector index = %d\n", + "%s: MSIX first vector index = %d\n", prefix, caps->msix_vector_first_id); break; case ICE_AQC_CAPS_FD: @@ -2050,7 +2057,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, if (dev_p) { dev_p->num_flow_director_fltr = number; ice_debug(hw, ICE_DBG_INIT, - "HW caps: Dev.fd_fltr =%d\n", + "%s: num FD filters = %d\n", prefix, dev_p->num_flow_director_fltr); } if (func_p) { @@ -2063,32 +2070,38 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, GLQF_FD_SIZE_FD_BSIZE_S; func_p->fd_fltr_best_effort = val; ice_debug(hw, ICE_DBG_INIT, - "HW:func.fd_fltr guar= %d\n", - func_p->fd_fltr_guar); + "%s: num guaranteed FD filters = %d\n", + prefix, func_p->fd_fltr_guar); ice_debug(hw, ICE_DBG_INIT, - "HW:func.fd_fltr best effort=%d\n", - func_p->fd_fltr_best_effort); + "%s: num best effort FD filters = %d\n", + prefix, func_p->fd_fltr_best_effort); } break; } case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; - if (dev_p) - ice_debug(hw, ICE_DBG_INIT, - "HW caps: Dev.MaxMTU = %d\n", - caps->max_mtu); - else if (func_p) - ice_debug(hw, ICE_DBG_INIT, - "HW caps: func.MaxMTU = %d\n", - caps->max_mtu); + ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n", + prefix, caps->max_mtu); break; default: ice_debug(hw, ICE_DBG_INIT, - "HW caps: Unknown capability[%d]: 0x%x\n", i, - cap); + "%s: unknown capability[%d]: 0x%x\n", prefix, + i, cap); break; } } + + /* Re-calculate capabilities that are dependent on the number of + * physical ports; i.e. some features are not supported or function + * differently on devices with more than 4 ports. + */ + if (caps && (ice_hweight32(caps->valid_functions) > 4)) { + /* Max 4 TCs per port */ + caps->maxtc = 4; + ice_debug(hw, ICE_DBG_INIT, + "%s: TC max = %d (based on #ports)\n", prefix, + caps->maxtc); + } } /** @@ -2401,10 +2414,10 @@ void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap) { - u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN; u64 pt_high; u64 pt_low; int index; + u16 speed; /* We first check with low part of phy_type */ for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { @@ -2485,38 +2498,38 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, */ enum ice_status ice_update_link_info(struct ice_port_info *pi) { - struct ice_aqc_get_phy_caps_data *pcaps; - struct ice_phy_info *phy_info; + struct ice_link_status *li; enum ice_status status; - struct ice_hw *hw; if (!pi) return ICE_ERR_PARAM; - hw = pi->hw; - - pcaps = (struct ice_aqc_get_phy_caps_data *) - ice_malloc(hw, sizeof(*pcaps)); - if (!pcaps) - return ICE_ERR_NO_MEMORY; + li = &pi->phy.link_info; - phy_info = &pi->phy; status = ice_aq_get_link_info(pi, true, NULL, NULL); if (status) - goto out; + return status; + + if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_hw *hw; - if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, + hw = pi->hw; + pcaps = (struct ice_aqc_get_phy_caps_data *) + ice_malloc(hw, sizeof(*pcaps)); + if (!pcaps) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); - if (status) - goto out; + if (status == ICE_SUCCESS) + ice_memcpy(li->module_type, &pcaps->module_type, + sizeof(li->module_type), + ICE_NONDMA_TO_NONDMA); - ice_memcpy(phy_info->link_info.module_type, &pcaps->module_type, - sizeof(phy_info->link_info.module_type), - ICE_NONDMA_TO_NONDMA); + ice_free(hw, pcaps); } -out: - ice_free(hw, pcaps); + return status; } @@ -2696,27 +2709,24 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { switch (fec) { case ICE_FEC_BASER: - /* Clear auto FEC and RS bits, and AND BASE-R ability + /* Clear RS bits, and AND BASE-R ability * bits and OR request bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | ICE_AQC_PHY_FEC_25G_KR_REQ; break; case ICE_FEC_RS: - /* Clear auto FEC and BASE-R bits, and AND RS ability + /* Clear BASE-R bits, and AND RS ability * bits and OR request bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | ICE_AQC_PHY_FEC_25G_RS_544_REQ; break; case ICE_FEC_NONE: - /* Clear auto FEC and all FEC option bits. */ - cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; + /* Clear all FEC option bits. */ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; break; case ICE_FEC_AUTO: @@ -3106,7 +3116,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_add_txqs *cmd; struct ice_aq_desc desc; - ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_lan_txq"); + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.add_txqs; @@ -3162,7 +3172,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, enum ice_status status; u16 i, sz = 0; - ice_debug(hw, ICE_DBG_TRACE, "ice_aq_dis_lan_txq"); + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.dis_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); @@ -3816,7 +3826,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; - if (!num_queues) { /* if queue is disabled already yet the disable queue command * has to be sent to complete the VF reset, then call @@ -3995,40 +4004,44 @@ void ice_replay_post(struct ice_hw *hw) /** * ice_stat_update40 - read 40 bit stat from the chip and update stat values * @hw: ptr to the hardware info - * @hireg: high 32 bit HW register to read from - * @loreg: low 32 bit HW register to read from + * @reg: offset of 64 bit HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ void -ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, - bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) +ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, + u64 *prev_stat, u64 *cur_stat) { - u64 new_data; - - new_data = rd32(hw, loreg); - new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; + u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. */ - if (!prev_stat_loaded) + if (!prev_stat_loaded) { *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ if (new_data >= *prev_stat) - *cur_stat = new_data - *prev_stat; + *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat; - *cur_stat &= 0xFFFFFFFFFFULL; + *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; } /** * ice_stat_update32 - read 32 bit stat from the chip and update stat values * @hw: ptr to the hardware info - * @reg: HW register to read from + * @reg: offset of HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value @@ -4042,17 +4055,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, new_data = rd32(hw, reg); /* device stats are not reset at PFR, they likely will not be zeroed - * when the driver starts. So save the first values read and use them as - * offsets to be subtracted from the raw values in order to report stats - * that count from zero. + * when the driver starts. Thus, save the value from the first read + * without adding to the statistic value so that we report stats which + * count up from zero. */ - if (!prev_stat_loaded) + if (!prev_stat_loaded) { *prev_stat = new_data; + return; + } + + /* Calculate the difference between the new and old values, and then + * add it to the software stat value. + */ if (new_data >= *prev_stat) - *cur_stat = new_data - *prev_stat; + *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ - *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; + *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; + + /* Update the previously stored value to prepare for next read */ + *prev_stat = new_data; }