1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 300
15 * ice_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
18 * This function sets the MAC type of the adapter based on the
19 * vendor ID and device ID stored in the HW structure.
21 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
23 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
25 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
26 return ICE_ERR_DEVICE_NOT_SUPPORTED;
28 switch (hw->device_id) {
29 case ICE_DEV_ID_E810C_BACKPLANE:
30 case ICE_DEV_ID_E810C_QSFP:
31 case ICE_DEV_ID_E810C_SFP:
32 case ICE_DEV_ID_E810_XXV_BACKPLANE:
33 case ICE_DEV_ID_E810_XXV_QSFP:
34 case ICE_DEV_ID_E810_XXV_SFP:
35 hw->mac_type = ICE_MAC_E810;
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 hw->mac_type = ICE_MAC_GENERIC;
49 hw->mac_type = ICE_MAC_UNKNOWN;
53 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
58 * ice_clear_pf_cfg - Clear PF configuration
59 * @hw: pointer to the hardware structure
61 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
62 * configuration, flow director filters, etc.).
64 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
66 struct ice_aq_desc desc;
68 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
70 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
74 * ice_aq_manage_mac_read - manage MAC address read command
75 * @hw: pointer to the HW struct
76 * @buf: a virtual buffer to hold the manage MAC read response
77 * @buf_size: Size of the virtual buffer
78 * @cd: pointer to command details structure or NULL
80 * This function is used to return per PF station MAC address (0x0107).
81 * NOTE: Upon successful completion of this command, MAC address information
82 * is returned in user specified buffer. Please interpret user specified
83 * buffer as "manage_mac_read" response.
84 * Response such as various MAC addresses are stored in HW struct (port.mac)
85 * ice_aq_discover_caps is expected to be called before this function is called.
87 static enum ice_status
88 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
91 struct ice_aqc_manage_mac_read_resp *resp;
92 struct ice_aqc_manage_mac_read *cmd;
93 struct ice_aq_desc desc;
94 enum ice_status status;
98 cmd = &desc.params.mac_read;
100 if (buf_size < sizeof(*resp))
101 return ICE_ERR_BUF_TOO_SHORT;
103 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
105 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
109 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
110 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
112 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
113 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
117 /* A single port can report up to two (LAN and WoL) addresses */
118 for (i = 0; i < cmd->num_addr; i++)
119 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
120 ice_memcpy(hw->port_info->mac.lan_addr,
121 resp[i].mac_addr, ETH_ALEN,
123 ice_memcpy(hw->port_info->mac.perm_addr,
125 ETH_ALEN, ICE_DMA_TO_NONDMA);
132 * ice_aq_get_phy_caps - returns PHY capabilities
133 * @pi: port information structure
134 * @qual_mods: report qualified modules
135 * @report_mode: report mode capabilities
136 * @pcaps: structure for PHY capabilities to be filled
137 * @cd: pointer to command details structure or NULL
139 * Returns the various PHY capabilities supported on the Port (0x0600)
142 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
143 struct ice_aqc_get_phy_caps_data *pcaps,
144 struct ice_sq_cd *cd)
146 struct ice_aqc_get_phy_caps *cmd;
147 u16 pcaps_size = sizeof(*pcaps);
148 struct ice_aq_desc desc;
149 enum ice_status status;
151 cmd = &desc.params.get_phy;
153 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
154 return ICE_ERR_PARAM;
156 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
159 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
161 cmd->param0 |= CPU_TO_LE16(report_mode);
162 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
164 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
165 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
166 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
173 * ice_aq_get_link_topo_handle - get link topology node return status
174 * @pi: port information structure
175 * @node_type: requested node type
176 * @cd: pointer to command details structure or NULL
178 * Get link topology node return status for specified node type (0x06E0)
180 * Node type cage can be used to determine if cage is present. If AQC
181 * returns error (ENOENT), then no cage present. If no cage present, then
182 * connection type is backplane or BASE-T.
184 static enum ice_status
185 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
186 struct ice_sq_cd *cd)
188 struct ice_aqc_get_link_topo *cmd;
189 struct ice_aq_desc desc;
191 cmd = &desc.params.get_link_topo;
193 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
195 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
196 ICE_AQC_LINK_TOPO_NODE_CTX_S);
199 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
201 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
205 * ice_is_media_cage_present
206 * @pi: port information structure
208 * Returns true if media cage is present, else false. If no cage, then
209 * media type is backplane or BASE-T.
211 static bool ice_is_media_cage_present(struct ice_port_info *pi)
213 /* Node type cage can be used to determine if cage is present. If AQC
214 * returns error (ENOENT), then no cage present. If no cage present then
215 * connection type is backplane or BASE-T.
217 return !ice_aq_get_link_topo_handle(pi,
218 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
223 * ice_get_media_type - Gets media type
224 * @pi: port information structure
226 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
228 struct ice_link_status *hw_link_info;
231 return ICE_MEDIA_UNKNOWN;
233 hw_link_info = &pi->phy.link_info;
234 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
235 /* If more than one media type is selected, report unknown */
236 return ICE_MEDIA_UNKNOWN;
238 if (hw_link_info->phy_type_low) {
239 switch (hw_link_info->phy_type_low) {
240 case ICE_PHY_TYPE_LOW_1000BASE_SX:
241 case ICE_PHY_TYPE_LOW_1000BASE_LX:
242 case ICE_PHY_TYPE_LOW_10GBASE_SR:
243 case ICE_PHY_TYPE_LOW_10GBASE_LR:
244 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
245 case ICE_PHY_TYPE_LOW_25GBASE_SR:
246 case ICE_PHY_TYPE_LOW_25GBASE_LR:
247 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
248 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
249 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
250 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
251 case ICE_PHY_TYPE_LOW_50GBASE_SR:
252 case ICE_PHY_TYPE_LOW_50GBASE_FR:
253 case ICE_PHY_TYPE_LOW_50GBASE_LR:
254 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
255 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
256 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
257 case ICE_PHY_TYPE_LOW_100GBASE_DR:
258 return ICE_MEDIA_FIBER;
259 case ICE_PHY_TYPE_LOW_100BASE_TX:
260 case ICE_PHY_TYPE_LOW_1000BASE_T:
261 case ICE_PHY_TYPE_LOW_2500BASE_T:
262 case ICE_PHY_TYPE_LOW_5GBASE_T:
263 case ICE_PHY_TYPE_LOW_10GBASE_T:
264 case ICE_PHY_TYPE_LOW_25GBASE_T:
265 return ICE_MEDIA_BASET;
266 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
267 case ICE_PHY_TYPE_LOW_25GBASE_CR:
268 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
269 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
270 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
271 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
272 case ICE_PHY_TYPE_LOW_50GBASE_CP:
273 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
274 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
275 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
277 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
278 case ICE_PHY_TYPE_LOW_40G_XLAUI:
279 case ICE_PHY_TYPE_LOW_50G_LAUI2:
280 case ICE_PHY_TYPE_LOW_50G_AUI2:
281 case ICE_PHY_TYPE_LOW_50G_AUI1:
282 case ICE_PHY_TYPE_LOW_100G_AUI4:
283 case ICE_PHY_TYPE_LOW_100G_CAUI4:
284 if (ice_is_media_cage_present(pi))
287 case ICE_PHY_TYPE_LOW_1000BASE_KX:
288 case ICE_PHY_TYPE_LOW_2500BASE_KX:
289 case ICE_PHY_TYPE_LOW_2500BASE_X:
290 case ICE_PHY_TYPE_LOW_5GBASE_KR:
291 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
292 case ICE_PHY_TYPE_LOW_25GBASE_KR:
293 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
294 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
295 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
296 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
297 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
298 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
299 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
300 return ICE_MEDIA_BACKPLANE;
303 switch (hw_link_info->phy_type_high) {
304 case ICE_PHY_TYPE_HIGH_100G_AUI2:
305 if (ice_is_media_cage_present(pi))
308 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
309 return ICE_MEDIA_BACKPLANE;
312 return ICE_MEDIA_UNKNOWN;
316 * ice_aq_get_link_info
317 * @pi: port information structure
318 * @ena_lse: enable/disable LinkStatusEvent reporting
319 * @link: pointer to link status structure - optional
320 * @cd: pointer to command details structure or NULL
322 * Get Link Status (0x607). Returns the link status of the adapter.
325 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
326 struct ice_link_status *link, struct ice_sq_cd *cd)
328 struct ice_aqc_get_link_status_data link_data = { 0 };
329 struct ice_aqc_get_link_status *resp;
330 struct ice_link_status *li_old, *li;
331 enum ice_media_type *hw_media_type;
332 struct ice_fc_info *hw_fc_info;
333 bool tx_pause, rx_pause;
334 struct ice_aq_desc desc;
335 enum ice_status status;
340 return ICE_ERR_PARAM;
342 li_old = &pi->phy.link_info_old;
343 hw_media_type = &pi->phy.media_type;
344 li = &pi->phy.link_info;
345 hw_fc_info = &pi->fc;
347 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
348 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
349 resp = &desc.params.get_link_status;
350 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
351 resp->lport_num = pi->lport;
353 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
355 if (status != ICE_SUCCESS)
358 /* save off old link status information */
361 /* update current link status information */
362 li->link_speed = LE16_TO_CPU(link_data.link_speed);
363 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
364 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
365 *hw_media_type = ice_get_media_type(pi);
366 li->link_info = link_data.link_info;
367 li->an_info = link_data.an_info;
368 li->ext_info = link_data.ext_info;
369 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
370 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
371 li->topo_media_conflict = link_data.topo_media_conflict;
372 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
373 ICE_AQ_CFG_PACING_TYPE_M);
376 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
377 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
378 if (tx_pause && rx_pause)
379 hw_fc_info->current_mode = ICE_FC_FULL;
381 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
383 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
385 hw_fc_info->current_mode = ICE_FC_NONE;
387 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
389 ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
390 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
391 (unsigned long long)li->phy_type_low);
392 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
393 (unsigned long long)li->phy_type_high);
394 ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
395 ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
396 ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
397 ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
398 ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
399 ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
400 ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
402 /* save link status information */
406 /* flag cleared so calling functions don't call AQ again */
407 pi->phy.get_link_info = false;
414 * @hw: pointer to the HW struct
415 * @max_frame_size: Maximum Frame Size to be supported
416 * @cd: pointer to command details structure or NULL
418 * Set MAC configuration (0x0603)
421 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
423 u16 fc_threshold_val, tx_timer_val;
424 struct ice_aqc_set_mac_cfg *cmd;
425 struct ice_aq_desc desc;
428 cmd = &desc.params.set_mac_cfg;
430 if (max_frame_size == 0)
431 return ICE_ERR_PARAM;
433 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
435 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
437 /* We read back the transmit timer and fc threshold value of
438 * LFC. Thus, we will use index =
439 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
441 * Also, because we are opearating on transmit timer and fc
442 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
444 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
446 /* Retrieve the transmit timer */
448 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
449 tx_timer_val = reg_val &
450 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
451 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
453 /* Retrieve the fc threshold */
455 PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
456 fc_threshold_val = reg_val & MAKEMASK(0xFFFF, 0);
457 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_threshold_val);
459 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
463 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
464 * @hw: pointer to the HW struct
466 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
468 struct ice_switch_info *sw;
470 hw->switch_info = (struct ice_switch_info *)
471 ice_malloc(hw, sizeof(*hw->switch_info));
472 sw = hw->switch_info;
475 return ICE_ERR_NO_MEMORY;
477 INIT_LIST_HEAD(&sw->vsi_list_map_head);
479 return ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
483 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
484 * @hw: pointer to the HW struct
486 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
488 struct ice_switch_info *sw = hw->switch_info;
489 struct ice_vsi_list_map_info *v_pos_map;
490 struct ice_vsi_list_map_info *v_tmp_map;
491 struct ice_sw_recipe *recps;
494 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
495 ice_vsi_list_map_info, list_entry) {
496 LIST_DEL(&v_pos_map->list_entry);
497 ice_free(hw, v_pos_map);
499 recps = hw->switch_info->recp_list;
500 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
501 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
503 recps[i].root_rid = i;
504 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
505 &recps[i].rg_list, ice_recp_grp_entry,
507 LIST_DEL(&rg_entry->l_entry);
508 ice_free(hw, rg_entry);
511 if (recps[i].adv_rule) {
512 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
513 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
515 ice_destroy_lock(&recps[i].filt_rule_lock);
516 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
517 &recps[i].filt_rules,
518 ice_adv_fltr_mgmt_list_entry,
520 LIST_DEL(&lst_itr->list_entry);
521 ice_free(hw, lst_itr->lkups);
522 ice_free(hw, lst_itr);
525 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
527 ice_destroy_lock(&recps[i].filt_rule_lock);
528 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
529 &recps[i].filt_rules,
530 ice_fltr_mgmt_list_entry,
532 LIST_DEL(&lst_itr->list_entry);
533 ice_free(hw, lst_itr);
536 if (recps[i].root_buf)
537 ice_free(hw, recps[i].root_buf);
539 ice_rm_all_sw_replay_rule_info(hw);
540 ice_free(hw, sw->recp_list);
545 * ice_get_itr_intrl_gran
546 * @hw: pointer to the HW struct
548 * Determines the ITR/INTRL granularities based on the maximum aggregate
549 * bandwidth according to the device's configuration during power-on.
551 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
553 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
554 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
555 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
557 switch (max_agg_bw) {
558 case ICE_MAX_AGG_BW_200G:
559 case ICE_MAX_AGG_BW_100G:
560 case ICE_MAX_AGG_BW_50G:
561 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
562 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
564 case ICE_MAX_AGG_BW_25G:
565 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
566 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
572 * ice_print_rollback_msg - print FW rollback message
573 * @hw: pointer to the hardware structure
575 void ice_print_rollback_msg(struct ice_hw *hw)
577 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
578 struct ice_nvm_info *nvm = &hw->nvm;
579 struct ice_orom_info *orom;
583 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
584 nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
585 orom->build, orom->patch);
587 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
588 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
592 * ice_init_hw - main hardware initialization routine
593 * @hw: pointer to the hardware structure
595 enum ice_status ice_init_hw(struct ice_hw *hw)
597 struct ice_aqc_get_phy_caps_data *pcaps;
598 enum ice_status status;
602 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
604 /* Set MAC type based on DeviceID */
605 status = ice_set_mac_type(hw);
609 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
610 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
611 PF_FUNC_RID_FUNCTION_NUMBER_S;
613 status = ice_reset(hw, ICE_RESET_PFR);
617 ice_get_itr_intrl_gran(hw);
619 status = ice_create_all_ctrlq(hw);
621 goto err_unroll_cqinit;
623 status = ice_init_nvm(hw);
625 goto err_unroll_cqinit;
627 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
628 ice_print_rollback_msg(hw);
630 status = ice_clear_pf_cfg(hw);
632 goto err_unroll_cqinit;
634 /* Set bit to enable Flow Director filters */
635 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
636 INIT_LIST_HEAD(&hw->fdir_list_head);
638 ice_clear_pxe_mode(hw);
640 status = ice_get_caps(hw);
642 goto err_unroll_cqinit;
644 hw->port_info = (struct ice_port_info *)
645 ice_malloc(hw, sizeof(*hw->port_info));
646 if (!hw->port_info) {
647 status = ICE_ERR_NO_MEMORY;
648 goto err_unroll_cqinit;
651 /* set the back pointer to HW */
652 hw->port_info->hw = hw;
654 /* Initialize port_info struct with switch configuration data */
655 status = ice_get_initial_sw_cfg(hw);
657 goto err_unroll_alloc;
660 /* Query the allocated resources for Tx scheduler */
661 status = ice_sched_query_res_alloc(hw);
663 ice_debug(hw, ICE_DBG_SCHED,
664 "Failed to get scheduler allocated resources\n");
665 goto err_unroll_alloc;
667 ice_sched_get_psm_clk_freq(hw);
669 /* Initialize port_info struct with scheduler data */
670 status = ice_sched_init_port(hw->port_info);
672 goto err_unroll_sched;
674 pcaps = (struct ice_aqc_get_phy_caps_data *)
675 ice_malloc(hw, sizeof(*pcaps));
677 status = ICE_ERR_NO_MEMORY;
678 goto err_unroll_sched;
681 /* Initialize port_info struct with PHY capabilities */
682 status = ice_aq_get_phy_caps(hw->port_info, false,
683 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
686 goto err_unroll_sched;
688 /* Initialize port_info struct with link information */
689 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
691 goto err_unroll_sched;
692 /* need a valid SW entry point to build a Tx tree */
693 if (!hw->sw_entry_point_layer) {
694 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
695 status = ICE_ERR_CFG;
696 goto err_unroll_sched;
698 INIT_LIST_HEAD(&hw->agg_list);
699 /* Initialize max burst size */
700 if (!hw->max_burst_size)
701 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
703 status = ice_init_fltr_mgmt_struct(hw);
705 goto err_unroll_sched;
707 /* Get MAC information */
708 /* A single port can report up to two (LAN and WoL) addresses */
709 mac_buf = ice_calloc(hw, 2,
710 sizeof(struct ice_aqc_manage_mac_read_resp));
711 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
714 status = ICE_ERR_NO_MEMORY;
715 goto err_unroll_fltr_mgmt_struct;
718 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
719 ice_free(hw, mac_buf);
722 goto err_unroll_fltr_mgmt_struct;
723 /* Obtain counter base index which would be used by flow director */
724 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
726 goto err_unroll_fltr_mgmt_struct;
727 status = ice_init_hw_tbls(hw);
729 goto err_unroll_fltr_mgmt_struct;
732 err_unroll_fltr_mgmt_struct:
733 ice_cleanup_fltr_mgmt_struct(hw);
735 ice_sched_cleanup_all(hw);
737 ice_free(hw, hw->port_info);
738 hw->port_info = NULL;
740 ice_destroy_all_ctrlq(hw);
745 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
746 * @hw: pointer to the hardware structure
748 * This should be called only during nominal operation, not as a result of
749 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
750 * applicable initializations if it fails for any reason.
752 void ice_deinit_hw(struct ice_hw *hw)
754 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
755 ice_cleanup_fltr_mgmt_struct(hw);
757 ice_sched_cleanup_all(hw);
758 ice_sched_clear_agg(hw);
760 ice_free_hw_tbls(hw);
763 ice_free(hw, hw->port_info);
764 hw->port_info = NULL;
767 ice_destroy_all_ctrlq(hw);
769 /* Clear VSI contexts if not already cleared */
770 ice_clear_all_vsi_ctx(hw);
774 * ice_check_reset - Check to see if a global reset is complete
775 * @hw: pointer to the hardware structure
777 enum ice_status ice_check_reset(struct ice_hw *hw)
779 u32 cnt, reg = 0, grst_delay, uld_mask;
781 /* Poll for Device Active state in case a recent CORER, GLOBR,
782 * or EMPR has occurred. The grst delay value is in 100ms units.
783 * Add 1sec for outstanding AQ commands that can take a long time.
785 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
786 GLGEN_RSTCTL_GRSTDEL_S) + 10;
788 for (cnt = 0; cnt < grst_delay; cnt++) {
789 ice_msec_delay(100, true);
790 reg = rd32(hw, GLGEN_RSTAT);
791 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
795 if (cnt == grst_delay) {
796 ice_debug(hw, ICE_DBG_INIT,
797 "Global reset polling failed to complete.\n");
798 return ICE_ERR_RESET_FAILED;
801 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
802 GLNVM_ULD_PCIER_DONE_1_M |\
803 GLNVM_ULD_CORER_DONE_M |\
804 GLNVM_ULD_GLOBR_DONE_M |\
805 GLNVM_ULD_POR_DONE_M |\
806 GLNVM_ULD_POR_DONE_1_M |\
807 GLNVM_ULD_PCIER_DONE_2_M)
809 uld_mask = ICE_RESET_DONE_MASK;
811 /* Device is Active; check Global Reset processes are done */
812 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
813 reg = rd32(hw, GLNVM_ULD) & uld_mask;
814 if (reg == uld_mask) {
815 ice_debug(hw, ICE_DBG_INIT,
816 "Global reset processes done. %d\n", cnt);
819 ice_msec_delay(10, true);
822 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
823 ice_debug(hw, ICE_DBG_INIT,
824 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
826 return ICE_ERR_RESET_FAILED;
833 * ice_pf_reset - Reset the PF
834 * @hw: pointer to the hardware structure
836 * If a global reset has been triggered, this function checks
837 * for its completion and then issues the PF reset
839 static enum ice_status ice_pf_reset(struct ice_hw *hw)
843 /* If at function entry a global reset was already in progress, i.e.
844 * state is not 'device active' or any of the reset done bits are not
845 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
846 * global reset is done.
848 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
849 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
850 /* poll on global reset currently in progress until done */
851 if (ice_check_reset(hw))
852 return ICE_ERR_RESET_FAILED;
858 reg = rd32(hw, PFGEN_CTRL);
860 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
862 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
863 reg = rd32(hw, PFGEN_CTRL);
864 if (!(reg & PFGEN_CTRL_PFSWR_M))
867 ice_msec_delay(1, true);
870 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
871 ice_debug(hw, ICE_DBG_INIT,
872 "PF reset polling failed to complete.\n");
873 return ICE_ERR_RESET_FAILED;
880 * ice_reset - Perform different types of reset
881 * @hw: pointer to the hardware structure
882 * @req: reset request
884 * This function triggers a reset as specified by the req parameter.
887 * If anything other than a PF reset is triggered, PXE mode is restored.
888 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
889 * interface has been restored in the rebuild flow.
891 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
897 return ice_pf_reset(hw);
898 case ICE_RESET_CORER:
899 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
900 val = GLGEN_RTRIG_CORER_M;
902 case ICE_RESET_GLOBR:
903 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
904 val = GLGEN_RTRIG_GLOBR_M;
907 return ICE_ERR_PARAM;
910 val |= rd32(hw, GLGEN_RTRIG);
911 wr32(hw, GLGEN_RTRIG, val);
914 /* wait for the FW to be ready */
915 return ice_check_reset(hw);
919 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
920 * @hw: pointer to hardware structure
921 * @module_tlv: pointer to module TLV to return
922 * @module_tlv_len: pointer to module TLV length to return
923 * @module_type: module type requested
925 * Finds the requested sub module TLV type from the Preserved Field
926 * Area (PFA) and returns the TLV pointer and length. The caller can
927 * use these to read the variable length TLV value.
930 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
933 enum ice_status status;
934 u16 pfa_len, pfa_ptr;
937 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
938 if (status != ICE_SUCCESS) {
939 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
942 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
943 if (status != ICE_SUCCESS) {
944 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
947 /* Starting with first TLV after PFA length, iterate through the list
948 * of TLVs to find the requested one.
950 next_tlv = pfa_ptr + 1;
951 while (next_tlv < pfa_ptr + pfa_len) {
952 u16 tlv_sub_module_type;
956 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
957 if (status != ICE_SUCCESS) {
958 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
961 /* Read TLV length */
962 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
963 if (status != ICE_SUCCESS) {
964 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
967 if (tlv_sub_module_type == module_type) {
969 *module_tlv = next_tlv;
970 *module_tlv_len = tlv_len;
973 return ICE_ERR_INVAL_SIZE;
975 /* Check next TLV, i.e. current TLV pointer + length + 2 words
976 * (for current TLV's type and length)
978 next_tlv = next_tlv + tlv_len + 2;
980 /* Module does not exist */
981 return ICE_ERR_DOES_NOT_EXIST;
985 * ice_copy_rxq_ctx_to_hw
986 * @hw: pointer to the hardware structure
987 * @ice_rxq_ctx: pointer to the rxq context
988 * @rxq_index: the index of the Rx queue
990 * Copies rxq context from dense structure to HW register space
992 static enum ice_status
993 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
998 return ICE_ERR_BAD_PTR;
1000 if (rxq_index > QRX_CTRL_MAX_INDEX)
1001 return ICE_ERR_PARAM;
1003 /* Copy each dword separately to HW */
1004 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1005 wr32(hw, QRX_CONTEXT(i, rxq_index),
1006 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1008 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1009 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1015 /* LAN Rx Queue Context */
1016 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1017 /* Field Width LSB */
1018 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1019 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1020 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1021 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1022 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1023 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1024 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1025 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1026 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1027 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1028 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1029 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1030 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1031 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1032 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1033 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1034 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1035 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1036 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1037 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1043 * @hw: pointer to the hardware structure
1044 * @rlan_ctx: pointer to the rxq context
1045 * @rxq_index: the index of the Rx queue
1047 * Converts rxq context from sparse to dense structure and then writes
1048 * it to HW register space and enables the hardware to prefetch descriptors
1049 * instead of only fetching them on demand
1052 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1055 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1058 return ICE_ERR_BAD_PTR;
1060 rlan_ctx->prefena = 1;
1062 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1063 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1068 * @hw: pointer to the hardware structure
1069 * @rxq_index: the index of the Rx queue to clear
1071 * Clears rxq context in HW register space
1073 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1077 if (rxq_index > QRX_CTRL_MAX_INDEX)
1078 return ICE_ERR_PARAM;
1080 /* Clear each dword register separately */
1081 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1082 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1087 /* LAN Tx Queue Context */
1088 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1089 /* Field Width LSB */
1090 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1091 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1092 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1093 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1094 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1095 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1096 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1097 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1098 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1099 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1100 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1101 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1102 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1103 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1104 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1105 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1106 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1107 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1108 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1109 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1110 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1111 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1112 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1113 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1114 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1115 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1116 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1117 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1122 * ice_copy_tx_cmpltnq_ctx_to_hw
1123 * @hw: pointer to the hardware structure
1124 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1125 * @tx_cmpltnq_index: the index of the completion queue
1127 * Copies Tx completion queue context from dense structure to HW register space
1129 static enum ice_status
1130 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1131 u32 tx_cmpltnq_index)
1135 if (!ice_tx_cmpltnq_ctx)
1136 return ICE_ERR_BAD_PTR;
1138 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1139 return ICE_ERR_PARAM;
1141 /* Copy each dword separately to HW */
1142 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1143 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1144 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1146 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1147 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1153 /* LAN Tx Completion Queue Context */
1154 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1155 /* Field Width LSB */
1156 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1157 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1158 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1159 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1160 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1161 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1162 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1163 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1164 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1165 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1170 * ice_write_tx_cmpltnq_ctx
1171 * @hw: pointer to the hardware structure
1172 * @tx_cmpltnq_ctx: pointer to the completion queue context
1173 * @tx_cmpltnq_index: the index of the completion queue
1175 * Converts completion queue context from sparse to dense structure and then
1176 * writes it to HW register space
1179 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1180 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1181 u32 tx_cmpltnq_index)
1183 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1185 ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1186 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1190 * ice_clear_tx_cmpltnq_ctx
1191 * @hw: pointer to the hardware structure
1192 * @tx_cmpltnq_index: the index of the completion queue to clear
1194 * Clears Tx completion queue context in HW register space
1197 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1201 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1202 return ICE_ERR_PARAM;
1204 /* Clear each dword register separately */
1205 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1206 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1212 * ice_copy_tx_drbell_q_ctx_to_hw
1213 * @hw: pointer to the hardware structure
1214 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1215 * @tx_drbell_q_index: the index of the doorbell queue
1217 * Copies doorbell queue context from dense structure to HW register space
1219 static enum ice_status
1220 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1221 u32 tx_drbell_q_index)
1225 if (!ice_tx_drbell_q_ctx)
1226 return ICE_ERR_BAD_PTR;
1228 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1229 return ICE_ERR_PARAM;
1231 /* Copy each dword separately to HW */
1232 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1233 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1234 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1236 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1237 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1243 /* LAN Tx Doorbell Queue Context info */
1244 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1245 /* Field Width LSB */
1246 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1247 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1248 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1249 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1250 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1251 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1252 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1253 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1254 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1255 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1256 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1261 * ice_write_tx_drbell_q_ctx
1262 * @hw: pointer to the hardware structure
1263 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1264 * @tx_drbell_q_index: the index of the doorbell queue
1266 * Converts doorbell queue context from sparse to dense structure and then
1267 * writes it to HW register space
1270 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1271 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1272 u32 tx_drbell_q_index)
1274 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1276 ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info);
1277 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1281 * ice_clear_tx_drbell_q_ctx
1282 * @hw: pointer to the hardware structure
1283 * @tx_drbell_q_index: the index of the doorbell queue to clear
1285 * Clears doorbell queue context in HW register space
1288 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1292 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1293 return ICE_ERR_PARAM;
1295 /* Clear each dword register separately */
1296 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1297 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1302 /* FW Admin Queue command wrappers */
1305 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1306 * @hw: pointer to the HW struct
1307 * @desc: descriptor describing the command
1308 * @buf: buffer to use for indirect commands (NULL for direct commands)
1309 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1310 * @cd: pointer to command details structure
1312 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1315 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1316 u16 buf_size, struct ice_sq_cd *cd)
1318 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1323 * @hw: pointer to the HW struct
1324 * @cd: pointer to command details structure or NULL
1326 * Get the firmware version (0x0001) from the admin queue commands
1328 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1330 struct ice_aqc_get_ver *resp;
1331 struct ice_aq_desc desc;
1332 enum ice_status status;
1334 resp = &desc.params.get_ver;
1336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1338 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1341 hw->fw_branch = resp->fw_branch;
1342 hw->fw_maj_ver = resp->fw_major;
1343 hw->fw_min_ver = resp->fw_minor;
1344 hw->fw_patch = resp->fw_patch;
1345 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1346 hw->api_branch = resp->api_branch;
1347 hw->api_maj_ver = resp->api_major;
1348 hw->api_min_ver = resp->api_minor;
1349 hw->api_patch = resp->api_patch;
1356 * ice_aq_send_driver_ver
1357 * @hw: pointer to the HW struct
1358 * @dv: driver's major, minor version
1359 * @cd: pointer to command details structure or NULL
1361 * Send the driver version (0x0002) to the firmware
1364 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1365 struct ice_sq_cd *cd)
1367 struct ice_aqc_driver_ver *cmd;
1368 struct ice_aq_desc desc;
1371 cmd = &desc.params.driver_ver;
1374 return ICE_ERR_PARAM;
1376 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1378 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1379 cmd->major_ver = dv->major_ver;
1380 cmd->minor_ver = dv->minor_ver;
1381 cmd->build_ver = dv->build_ver;
1382 cmd->subbuild_ver = dv->subbuild_ver;
1385 while (len < sizeof(dv->driver_string) &&
1386 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1389 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1394 * @hw: pointer to the HW struct
1395 * @unloading: is the driver unloading itself
1397 * Tell the Firmware that we're shutting down the AdminQ and whether
1398 * or not the driver is unloading as well (0x0003).
1400 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1402 struct ice_aqc_q_shutdown *cmd;
1403 struct ice_aq_desc desc;
1405 cmd = &desc.params.q_shutdown;
1407 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1410 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1412 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1417 * @hw: pointer to the HW struct
1419 * @access: access type
1420 * @sdp_number: resource number
1421 * @timeout: the maximum time in ms that the driver may hold the resource
1422 * @cd: pointer to command details structure or NULL
1424 * Requests common resource using the admin queue commands (0x0008).
1425 * When attempting to acquire the Global Config Lock, the driver can
1426 * learn of three states:
1427 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1428 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1429 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1430 * successfully downloaded the package; the driver does
1431 * not have to download the package and can continue
1434 * Note that if the caller is in an acquire lock, perform action, release lock
1435 * phase of operation, it is possible that the FW may detect a timeout and issue
1436 * a CORER. In this case, the driver will receive a CORER interrupt and will
1437 * have to determine its cause. The calling thread that is handling this flow
1438 * will likely get an error propagated back to it indicating the Download
1439 * Package, Update Package or the Release Resource AQ commands timed out.
1441 static enum ice_status
1442 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1443 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1444 struct ice_sq_cd *cd)
1446 struct ice_aqc_req_res *cmd_resp;
1447 struct ice_aq_desc desc;
1448 enum ice_status status;
1450 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1452 cmd_resp = &desc.params.res_owner;
1454 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1456 cmd_resp->res_id = CPU_TO_LE16(res);
1457 cmd_resp->access_type = CPU_TO_LE16(access);
1458 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1459 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1462 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1464 /* The completion specifies the maximum time in ms that the driver
1465 * may hold the resource in the Timeout field.
1468 /* Global config lock response utilizes an additional status field.
1470 * If the Global config lock resource is held by some other driver, the
1471 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1472 * and the timeout field indicates the maximum time the current owner
1473 * of the resource has to free it.
1475 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1476 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1477 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1479 } else if (LE16_TO_CPU(cmd_resp->status) ==
1480 ICE_AQ_RES_GLBL_IN_PROG) {
1481 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1482 return ICE_ERR_AQ_ERROR;
1483 } else if (LE16_TO_CPU(cmd_resp->status) ==
1484 ICE_AQ_RES_GLBL_DONE) {
1485 return ICE_ERR_AQ_NO_WORK;
1488 /* invalid FW response, force a timeout immediately */
1490 return ICE_ERR_AQ_ERROR;
1493 /* If the resource is held by some other driver, the command completes
1494 * with a busy return value and the timeout field indicates the maximum
1495 * time the current owner of the resource has to free it.
1497 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1498 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1504 * ice_aq_release_res
1505 * @hw: pointer to the HW struct
1507 * @sdp_number: resource number
1508 * @cd: pointer to command details structure or NULL
1510 * release common resource using the admin queue commands (0x0009)
1512 static enum ice_status
1513 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1514 struct ice_sq_cd *cd)
1516 struct ice_aqc_req_res *cmd;
1517 struct ice_aq_desc desc;
1519 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1521 cmd = &desc.params.res_owner;
1523 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1525 cmd->res_id = CPU_TO_LE16(res);
1526 cmd->res_number = CPU_TO_LE32(sdp_number);
1528 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1533 * @hw: pointer to the HW structure
1535 * @access: access type (read or write)
1536 * @timeout: timeout in milliseconds
1538 * This function will attempt to acquire the ownership of a resource.
1541 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1542 enum ice_aq_res_access_type access, u32 timeout)
1544 #define ICE_RES_POLLING_DELAY_MS 10
1545 u32 delay = ICE_RES_POLLING_DELAY_MS;
1546 u32 time_left = timeout;
1547 enum ice_status status;
1549 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1551 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1553 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1554 * previously acquired the resource and performed any necessary updates;
1555 * in this case the caller does not obtain the resource and has no
1556 * further work to do.
1558 if (status == ICE_ERR_AQ_NO_WORK)
1559 goto ice_acquire_res_exit;
1562 ice_debug(hw, ICE_DBG_RES,
1563 "resource %d acquire type %d failed.\n", res, access);
1565 /* If necessary, poll until the current lock owner timeouts */
1566 timeout = time_left;
1567 while (status && timeout && time_left) {
1568 ice_msec_delay(delay, true);
1569 timeout = (timeout > delay) ? timeout - delay : 0;
1570 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1572 if (status == ICE_ERR_AQ_NO_WORK)
1573 /* lock free, but no work to do */
1580 if (status && status != ICE_ERR_AQ_NO_WORK)
1581 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1583 ice_acquire_res_exit:
1584 if (status == ICE_ERR_AQ_NO_WORK) {
1585 if (access == ICE_RES_WRITE)
1586 ice_debug(hw, ICE_DBG_RES,
1587 "resource indicates no work to do.\n");
1589 ice_debug(hw, ICE_DBG_RES,
1590 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1597 * @hw: pointer to the HW structure
1600 * This function will release a resource using the proper Admin Command.
1602 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1604 enum ice_status status;
1605 u32 total_delay = 0;
1607 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1609 status = ice_aq_release_res(hw, res, 0, NULL);
1611 /* there are some rare cases when trying to release the resource
1612 * results in an admin queue timeout, so handle them correctly
1614 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1615 (total_delay < hw->adminq.sq_cmd_timeout)) {
1616 ice_msec_delay(1, true);
1617 status = ice_aq_release_res(hw, res, 0, NULL);
1623 * ice_aq_alloc_free_res - command to allocate/free resources
1624 * @hw: pointer to the HW struct
1625 * @num_entries: number of resource entries in buffer
1626 * @buf: Indirect buffer to hold data parameters and response
1627 * @buf_size: size of buffer for indirect commands
1628 * @opc: pass in the command opcode
1629 * @cd: pointer to command details structure or NULL
1631 * Helper function to allocate/free resources using the admin queue commands
1634 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1635 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1636 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1638 struct ice_aqc_alloc_free_res_cmd *cmd;
1639 struct ice_aq_desc desc;
1641 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1643 cmd = &desc.params.sw_res_ctrl;
1646 return ICE_ERR_PARAM;
1648 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1649 return ICE_ERR_PARAM;
1651 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1653 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1655 cmd->num_entries = CPU_TO_LE16(num_entries);
1657 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1661 * ice_alloc_hw_res - allocate resource
1662 * @hw: pointer to the HW struct
1663 * @type: type of resource
1664 * @num: number of resources to allocate
1665 * @btm: allocate from bottom
1666 * @res: pointer to array that will receive the resources
1669 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1671 struct ice_aqc_alloc_free_res_elem *buf;
1672 enum ice_status status;
1675 buf_len = ice_struct_size(buf, elem, num - 1);
1676 buf = (struct ice_aqc_alloc_free_res_elem *)
1677 ice_malloc(hw, buf_len);
1679 return ICE_ERR_NO_MEMORY;
1681 /* Prepare buffer to allocate resource. */
1682 buf->num_elems = CPU_TO_LE16(num);
1683 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1684 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1686 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1688 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1689 ice_aqc_opc_alloc_res, NULL);
1691 goto ice_alloc_res_exit;
1693 ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
1694 ICE_NONDMA_TO_NONDMA);
1702 * ice_free_hw_res - free allocated HW resource
1703 * @hw: pointer to the HW struct
1704 * @type: type of resource to free
1705 * @num: number of resources
1706 * @res: pointer to array that contains the resources to free
1709 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1711 struct ice_aqc_alloc_free_res_elem *buf;
1712 enum ice_status status;
1715 buf_len = ice_struct_size(buf, elem, num - 1);
1716 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1718 return ICE_ERR_NO_MEMORY;
1720 /* Prepare buffer to free resource. */
1721 buf->num_elems = CPU_TO_LE16(num);
1722 buf->res_type = CPU_TO_LE16(type);
1723 ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
1724 ICE_NONDMA_TO_NONDMA);
1726 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1727 ice_aqc_opc_free_res, NULL);
1729 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1736 * ice_get_num_per_func - determine number of resources per PF
1737 * @hw: pointer to the HW structure
1738 * @max: value to be evenly split between each PF
1740 * Determine the number of valid functions by going through the bitmap returned
1741 * from parsing capabilities and use this to calculate the number of resources
1742 * per PF based on the max value passed in.
1744 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1748 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1749 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1750 ICE_CAPS_VALID_FUNCS_M);
1759 * ice_parse_caps - parse function/device capabilities
1760 * @hw: pointer to the HW struct
1761 * @buf: pointer to a buffer containing function/device capability records
1762 * @cap_count: number of capability records in the list
1763 * @opc: type of capabilities list to parse
1765 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1768 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1769 enum ice_adminq_opc opc)
1771 struct ice_aqc_list_caps_elem *cap_resp;
1772 struct ice_hw_func_caps *func_p = NULL;
1773 struct ice_hw_dev_caps *dev_p = NULL;
1774 struct ice_hw_common_caps *caps;
1781 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1783 if (opc == ice_aqc_opc_list_dev_caps) {
1784 dev_p = &hw->dev_caps;
1785 caps = &dev_p->common_cap;
1787 } else if (opc == ice_aqc_opc_list_func_caps) {
1788 func_p = &hw->func_caps;
1789 caps = &func_p->common_cap;
1790 prefix = "func cap";
1792 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1796 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1797 u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
1798 u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
1799 u32 number = LE32_TO_CPU(cap_resp->number);
1800 u16 cap = LE16_TO_CPU(cap_resp->cap);
1803 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1804 caps->valid_functions = number;
1805 ice_debug(hw, ICE_DBG_INIT,
1806 "%s: valid_functions (bitmap) = %d\n", prefix,
1807 caps->valid_functions);
1809 /* store func count for resource management purposes */
1811 dev_p->num_funcs = ice_hweight32(number);
1813 case ICE_AQC_CAPS_VSI:
1815 dev_p->num_vsi_allocd_to_host = number;
1816 ice_debug(hw, ICE_DBG_INIT,
1817 "%s: num_vsi_allocd_to_host = %d\n",
1819 dev_p->num_vsi_allocd_to_host);
1820 } else if (func_p) {
1821 func_p->guar_num_vsi =
1822 ice_get_num_per_func(hw, ICE_MAX_VSI);
1823 ice_debug(hw, ICE_DBG_INIT,
1824 "%s: guar_num_vsi (fw) = %d\n",
1826 ice_debug(hw, ICE_DBG_INIT,
1827 "%s: guar_num_vsi = %d\n",
1828 prefix, func_p->guar_num_vsi);
1831 case ICE_AQC_CAPS_DCB:
1832 caps->dcb = (number == 1);
1833 caps->active_tc_bitmap = logical_id;
1834 caps->maxtc = phys_id;
1835 ice_debug(hw, ICE_DBG_INIT,
1836 "%s: dcb = %d\n", prefix, caps->dcb);
1837 ice_debug(hw, ICE_DBG_INIT,
1838 "%s: active_tc_bitmap = %d\n", prefix,
1839 caps->active_tc_bitmap);
1840 ice_debug(hw, ICE_DBG_INIT,
1841 "%s: maxtc = %d\n", prefix, caps->maxtc);
1843 case ICE_AQC_CAPS_RSS:
1844 caps->rss_table_size = number;
1845 caps->rss_table_entry_width = logical_id;
1846 ice_debug(hw, ICE_DBG_INIT,
1847 "%s: rss_table_size = %d\n", prefix,
1848 caps->rss_table_size);
1849 ice_debug(hw, ICE_DBG_INIT,
1850 "%s: rss_table_entry_width = %d\n", prefix,
1851 caps->rss_table_entry_width);
1853 case ICE_AQC_CAPS_RXQS:
1854 caps->num_rxq = number;
1855 caps->rxq_first_id = phys_id;
1856 ice_debug(hw, ICE_DBG_INIT,
1857 "%s: num_rxq = %d\n", prefix,
1859 ice_debug(hw, ICE_DBG_INIT,
1860 "%s: rxq_first_id = %d\n", prefix,
1861 caps->rxq_first_id);
1863 case ICE_AQC_CAPS_TXQS:
1864 caps->num_txq = number;
1865 caps->txq_first_id = phys_id;
1866 ice_debug(hw, ICE_DBG_INIT,
1867 "%s: num_txq = %d\n", prefix,
1869 ice_debug(hw, ICE_DBG_INIT,
1870 "%s: txq_first_id = %d\n", prefix,
1871 caps->txq_first_id);
1873 case ICE_AQC_CAPS_MSIX:
1874 caps->num_msix_vectors = number;
1875 caps->msix_vector_first_id = phys_id;
1876 ice_debug(hw, ICE_DBG_INIT,
1877 "%s: num_msix_vectors = %d\n", prefix,
1878 caps->num_msix_vectors);
1879 ice_debug(hw, ICE_DBG_INIT,
1880 "%s: msix_vector_first_id = %d\n", prefix,
1881 caps->msix_vector_first_id);
1883 case ICE_AQC_CAPS_FD:
1888 dev_p->num_flow_director_fltr = number;
1889 ice_debug(hw, ICE_DBG_INIT,
1890 "%s: num_flow_director_fltr = %d\n",
1892 dev_p->num_flow_director_fltr);
1895 reg_val = rd32(hw, GLQF_FD_SIZE);
1896 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1897 GLQF_FD_SIZE_FD_GSIZE_S;
1898 func_p->fd_fltr_guar =
1899 ice_get_num_per_func(hw, val);
1900 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1901 GLQF_FD_SIZE_FD_BSIZE_S;
1902 func_p->fd_fltr_best_effort = val;
1903 ice_debug(hw, ICE_DBG_INIT,
1904 "%s: fd_fltr_guar = %d\n",
1905 prefix, func_p->fd_fltr_guar);
1906 ice_debug(hw, ICE_DBG_INIT,
1907 "%s: fd_fltr_best_effort = %d\n",
1908 prefix, func_p->fd_fltr_best_effort);
1912 case ICE_AQC_CAPS_MAX_MTU:
1913 caps->max_mtu = number;
1914 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1915 prefix, caps->max_mtu);
1918 ice_debug(hw, ICE_DBG_INIT,
1919 "%s: unknown capability[%d]: 0x%x\n", prefix,
1925 /* Re-calculate capabilities that are dependent on the number of
1926 * physical ports; i.e. some features are not supported or function
1927 * differently on devices with more than 4 ports.
1929 if (hw->dev_caps.num_funcs > 4) {
1930 /* Max 4 TCs per port */
1932 ice_debug(hw, ICE_DBG_INIT,
1933 "%s: maxtc = %d (based on #ports)\n", prefix,
1939 * ice_aq_discover_caps - query function/device capabilities
1940 * @hw: pointer to the HW struct
1941 * @buf: a virtual buffer to hold the capabilities
1942 * @buf_size: Size of the virtual buffer
1943 * @cap_count: cap count needed if AQ err==ENOMEM
1944 * @opc: capabilities type to discover - pass in the command opcode
1945 * @cd: pointer to command details structure or NULL
1947 * Get the function(0x000a)/device(0x000b) capabilities description from
1950 static enum ice_status
1951 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1952 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1954 struct ice_aqc_list_caps *cmd;
1955 struct ice_aq_desc desc;
1956 enum ice_status status;
1958 cmd = &desc.params.get_cap;
1960 if (opc != ice_aqc_opc_list_func_caps &&
1961 opc != ice_aqc_opc_list_dev_caps)
1962 return ICE_ERR_PARAM;
1964 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1966 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1968 ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc);
1969 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1970 *cap_count = LE32_TO_CPU(cmd->count);
1975 * ice_discover_caps - get info about the HW
1976 * @hw: pointer to the hardware structure
1977 * @opc: capabilities type to discover - pass in the command opcode
1979 static enum ice_status
1980 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1982 enum ice_status status;
1987 /* The driver doesn't know how many capabilities the device will return
1988 * so the buffer size required isn't known ahead of time. The driver
1989 * starts with cbuf_len and if this turns out to be insufficient, the
1990 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1991 * The driver then allocates the buffer based on the count and retries
1992 * the operation. So it follows that the retry count is 2.
1994 #define ICE_GET_CAP_BUF_COUNT 40
1995 #define ICE_GET_CAP_RETRY_COUNT 2
1997 cap_count = ICE_GET_CAP_BUF_COUNT;
1998 retries = ICE_GET_CAP_RETRY_COUNT;
2003 cbuf_len = (u16)(cap_count *
2004 sizeof(struct ice_aqc_list_caps_elem));
2005 cbuf = ice_malloc(hw, cbuf_len);
2007 return ICE_ERR_NO_MEMORY;
2009 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
2013 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
2016 /* If ENOMEM is returned, try again with bigger buffer */
2017 } while (--retries);
2023 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2024 * @hw: pointer to the hardware structure
2026 void ice_set_safe_mode_caps(struct ice_hw *hw)
2028 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2029 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2030 u32 valid_func, rxq_first_id, txq_first_id;
2031 u32 msix_vector_first_id, max_mtu;
2034 /* cache some func_caps values that should be restored after memset */
2035 valid_func = func_caps->common_cap.valid_functions;
2036 txq_first_id = func_caps->common_cap.txq_first_id;
2037 rxq_first_id = func_caps->common_cap.rxq_first_id;
2038 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
2039 max_mtu = func_caps->common_cap.max_mtu;
2041 /* unset func capabilities */
2042 memset(func_caps, 0, sizeof(*func_caps));
2044 /* restore cached values */
2045 func_caps->common_cap.valid_functions = valid_func;
2046 func_caps->common_cap.txq_first_id = txq_first_id;
2047 func_caps->common_cap.rxq_first_id = rxq_first_id;
2048 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2049 func_caps->common_cap.max_mtu = max_mtu;
2051 /* one Tx and one Rx queue in safe mode */
2052 func_caps->common_cap.num_rxq = 1;
2053 func_caps->common_cap.num_txq = 1;
2055 /* two MSIX vectors, one for traffic and one for misc causes */
2056 func_caps->common_cap.num_msix_vectors = 2;
2057 func_caps->guar_num_vsi = 1;
2059 /* cache some dev_caps values that should be restored after memset */
2060 valid_func = dev_caps->common_cap.valid_functions;
2061 txq_first_id = dev_caps->common_cap.txq_first_id;
2062 rxq_first_id = dev_caps->common_cap.rxq_first_id;
2063 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
2064 max_mtu = dev_caps->common_cap.max_mtu;
2065 num_funcs = dev_caps->num_funcs;
2067 /* unset dev capabilities */
2068 memset(dev_caps, 0, sizeof(*dev_caps));
2070 /* restore cached values */
2071 dev_caps->common_cap.valid_functions = valid_func;
2072 dev_caps->common_cap.txq_first_id = txq_first_id;
2073 dev_caps->common_cap.rxq_first_id = rxq_first_id;
2074 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2075 dev_caps->common_cap.max_mtu = max_mtu;
2076 dev_caps->num_funcs = num_funcs;
2078 /* one Tx and one Rx queue per function in safe mode */
2079 dev_caps->common_cap.num_rxq = num_funcs;
2080 dev_caps->common_cap.num_txq = num_funcs;
2082 /* two MSIX vectors per function */
2083 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2087 * ice_get_caps - get info about the HW
2088 * @hw: pointer to the hardware structure
2090 enum ice_status ice_get_caps(struct ice_hw *hw)
2092 enum ice_status status;
2094 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
2096 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
2102 * ice_aq_manage_mac_write - manage MAC address write command
2103 * @hw: pointer to the HW struct
2104 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2105 * @flags: flags to control write behavior
2106 * @cd: pointer to command details structure or NULL
2108 * This function is used to write MAC address to the NVM (0x0108).
2111 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2112 struct ice_sq_cd *cd)
2114 struct ice_aqc_manage_mac_write *cmd;
2115 struct ice_aq_desc desc;
2117 cmd = &desc.params.mac_write;
2118 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2122 /* Prep values for flags, sah, sal */
2123 cmd->sah = HTONS(*((const u16 *)mac_addr));
2124 cmd->sal = HTONL(*((const u32 *)(mac_addr + 2)));
2126 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2130 * ice_aq_clear_pxe_mode
2131 * @hw: pointer to the HW struct
2133 * Tell the firmware that the driver is taking over from PXE (0x0110).
2135 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2137 struct ice_aq_desc desc;
2139 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2140 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2142 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2146 * ice_clear_pxe_mode - clear pxe operations mode
2147 * @hw: pointer to the HW struct
2149 * Make sure all PXE mode settings are cleared, including things
2150 * like descriptor fetch/write-back mode.
2152 void ice_clear_pxe_mode(struct ice_hw *hw)
2154 if (ice_check_sq_alive(hw, &hw->adminq))
2155 ice_aq_clear_pxe_mode(hw);
2159 * ice_get_link_speed_based_on_phy_type - returns link speed
2160 * @phy_type_low: lower part of phy_type
2161 * @phy_type_high: higher part of phy_type
2163 * This helper function will convert an entry in PHY type structure
2164 * [phy_type_low, phy_type_high] to its corresponding link speed.
2165 * Note: In the structure of [phy_type_low, phy_type_high], there should
2166 * be one bit set, as this function will convert one PHY type to its
2168 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2169 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2172 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2174 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2175 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2177 switch (phy_type_low) {
2178 case ICE_PHY_TYPE_LOW_100BASE_TX:
2179 case ICE_PHY_TYPE_LOW_100M_SGMII:
2180 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2182 case ICE_PHY_TYPE_LOW_1000BASE_T:
2183 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2184 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2185 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2186 case ICE_PHY_TYPE_LOW_1G_SGMII:
2187 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2189 case ICE_PHY_TYPE_LOW_2500BASE_T:
2190 case ICE_PHY_TYPE_LOW_2500BASE_X:
2191 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2192 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2194 case ICE_PHY_TYPE_LOW_5GBASE_T:
2195 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2196 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2198 case ICE_PHY_TYPE_LOW_10GBASE_T:
2199 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2200 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2201 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2202 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2203 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2204 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2205 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2207 case ICE_PHY_TYPE_LOW_25GBASE_T:
2208 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2209 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2210 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2211 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2212 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2213 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2214 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2215 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2216 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2217 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2218 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2220 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2221 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2222 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2223 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2224 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2225 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2226 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2228 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2229 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2230 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2231 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2232 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2233 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2234 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2235 case ICE_PHY_TYPE_LOW_50G_AUI2:
2236 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2237 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2238 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2239 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2240 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2241 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2242 case ICE_PHY_TYPE_LOW_50G_AUI1:
2243 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2245 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2246 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2247 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2248 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2249 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2250 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2251 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2252 case ICE_PHY_TYPE_LOW_100G_AUI4:
2253 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2254 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2255 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2256 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2257 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2258 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2261 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2265 switch (phy_type_high) {
2266 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2267 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2268 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2269 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2270 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2271 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2274 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2278 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2279 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2280 return ICE_AQ_LINK_SPEED_UNKNOWN;
2281 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2282 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2283 return ICE_AQ_LINK_SPEED_UNKNOWN;
2284 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2285 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2286 return speed_phy_type_low;
2288 return speed_phy_type_high;
2292 * ice_update_phy_type
2293 * @phy_type_low: pointer to the lower part of phy_type
2294 * @phy_type_high: pointer to the higher part of phy_type
2295 * @link_speeds_bitmap: targeted link speeds bitmap
2297 * Note: For the link_speeds_bitmap structure, you can check it at
2298 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2299 * link_speeds_bitmap include multiple speeds.
2301 * Each entry in this [phy_type_low, phy_type_high] structure will
2302 * present a certain link speed. This helper function will turn on bits
2303 * in [phy_type_low, phy_type_high] structure based on the value of
2304 * link_speeds_bitmap input parameter.
2307 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2308 u16 link_speeds_bitmap)
2315 /* We first check with low part of phy_type */
2316 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2317 pt_low = BIT_ULL(index);
2318 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2320 if (link_speeds_bitmap & speed)
2321 *phy_type_low |= BIT_ULL(index);
2324 /* We then check with high part of phy_type */
2325 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2326 pt_high = BIT_ULL(index);
2327 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2329 if (link_speeds_bitmap & speed)
2330 *phy_type_high |= BIT_ULL(index);
2335 * ice_aq_set_phy_cfg
2336 * @hw: pointer to the HW struct
2337 * @pi: port info structure of the interested logical port
2338 * @cfg: structure with PHY configuration data to be set
2339 * @cd: pointer to command details structure or NULL
2341 * Set the various PHY configuration parameters supported on the Port.
2342 * One or more of the Set PHY config parameters may be ignored in an MFP
2343 * mode as the PF may not have the privilege to set some of the PHY Config
2344 * parameters. This status will be indicated by the command response (0x0601).
2347 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2348 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2350 struct ice_aq_desc desc;
2351 enum ice_status status;
2354 return ICE_ERR_PARAM;
2356 /* Ensure that only valid bits of cfg->caps can be turned on. */
2357 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2358 ice_debug(hw, ICE_DBG_PHY,
2359 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2362 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2365 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2366 desc.params.set_phy.lport_num = pi->lport;
2367 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2369 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2370 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2371 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2372 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2373 ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2374 ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl_an = 0x%x\n",
2375 cfg->low_power_ctrl_an);
2376 ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2377 ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2378 ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2380 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2383 pi->phy.curr_user_phy_cfg = *cfg;
2389 * ice_update_link_info - update status of the HW network link
2390 * @pi: port info structure of the interested logical port
2392 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2394 struct ice_link_status *li;
2395 enum ice_status status;
2398 return ICE_ERR_PARAM;
2400 li = &pi->phy.link_info;
2402 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2406 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2407 struct ice_aqc_get_phy_caps_data *pcaps;
2411 pcaps = (struct ice_aqc_get_phy_caps_data *)
2412 ice_malloc(hw, sizeof(*pcaps));
2414 return ICE_ERR_NO_MEMORY;
2416 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2418 if (status == ICE_SUCCESS)
2419 ice_memcpy(li->module_type, &pcaps->module_type,
2420 sizeof(li->module_type),
2421 ICE_NONDMA_TO_NONDMA);
2423 ice_free(hw, pcaps);
2430 * ice_cache_phy_user_req
2431 * @pi: port information structure
2432 * @cache_data: PHY logging data
2433 * @cache_mode: PHY logging mode
2435 * Log the user request on (FC, FEC, SPEED) for later user.
2438 ice_cache_phy_user_req(struct ice_port_info *pi,
2439 struct ice_phy_cache_mode_data cache_data,
2440 enum ice_phy_cache_mode cache_mode)
2445 switch (cache_mode) {
2447 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2449 case ICE_SPEED_MODE:
2450 pi->phy.curr_user_speed_req =
2451 cache_data.data.curr_user_speed_req;
2454 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2462 * ice_caps_to_fc_mode
2463 * @caps: PHY capabilities
2465 * Convert PHY FC capabilities to ice FC mode
2467 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2469 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2470 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2473 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2474 return ICE_FC_TX_PAUSE;
2476 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2477 return ICE_FC_RX_PAUSE;
2483 * ice_caps_to_fec_mode
2484 * @caps: PHY capabilities
2485 * @fec_options: Link FEC options
2487 * Convert PHY FEC capabilities to ice FEC mode
2489 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2491 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2492 return ICE_FEC_AUTO;
2494 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2495 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2496 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2497 ICE_AQC_PHY_FEC_25G_KR_REQ))
2498 return ICE_FEC_BASER;
2500 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2501 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2502 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2505 return ICE_FEC_NONE;
2510 * @pi: port information structure
2511 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2512 * @ena_auto_link_update: enable automatic link update
2514 * Set the requested flow control mode.
2517 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2519 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2520 struct ice_phy_cache_mode_data cache_data;
2521 struct ice_aqc_get_phy_caps_data *pcaps;
2522 enum ice_status status;
2523 u8 pause_mask = 0x0;
2526 if (!pi || !aq_failures)
2527 return ICE_ERR_PARAM;
2530 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2532 /* Cache user FC request */
2533 cache_data.data.curr_user_fc_req = pi->fc.req_mode;
2534 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2536 pcaps = (struct ice_aqc_get_phy_caps_data *)
2537 ice_malloc(hw, sizeof(*pcaps));
2539 return ICE_ERR_NO_MEMORY;
2541 switch (pi->fc.req_mode) {
2543 /* Query the value of FC that both the NIC and attached media
2546 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2549 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2553 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2554 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2557 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2558 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2560 case ICE_FC_RX_PAUSE:
2561 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2563 case ICE_FC_TX_PAUSE:
2564 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2570 /* Get the current PHY config */
2571 ice_memset(pcaps, 0, sizeof(*pcaps), ICE_NONDMA_MEM);
2572 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2575 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2579 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2581 /* clear the old pause settings */
2582 cfg.caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2583 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2585 /* set the new capabilities */
2586 cfg.caps |= pause_mask;
2588 /* If the capabilities have changed, then set the new config */
2589 if (cfg.caps != pcaps->caps) {
2590 int retry_count, retry_max = 10;
2592 /* Auto restart link so settings take effect */
2593 if (ena_auto_link_update)
2594 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2596 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2598 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2602 /* Update the link info
2603 * It sometimes takes a really long time for link to
2604 * come back from the atomic reset. Thus, we wait a
2607 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2608 status = ice_update_link_info(pi);
2610 if (status == ICE_SUCCESS)
2613 ice_msec_delay(100, true);
2617 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2621 ice_free(hw, pcaps);
2626 * ice_phy_caps_equals_cfg
2627 * @phy_caps: PHY capabilities
2628 * @phy_cfg: PHY configuration
2630 * Helper function to determine if PHY capabilities matches PHY
2634 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2635 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2637 u8 caps_mask, cfg_mask;
2639 if (!phy_caps || !phy_cfg)
2642 /* These bits are not common between capabilities and configuration.
2643 * Do not use them to determine equality.
2645 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2646 ICE_AQC_PHY_EN_MOD_QUAL);
2647 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2649 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2650 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2651 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2652 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2653 phy_caps->eee_cap != phy_cfg->eee_cap ||
2654 phy_caps->eeer_value != phy_cfg->eeer_value ||
2655 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2662 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2663 * @pi: port information structure
2664 * @caps: PHY ability structure to copy date from
2665 * @cfg: PHY configuration structure to copy data to
2667 * Helper function to copy AQC PHY get ability data to PHY set configuration
2671 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2672 struct ice_aqc_get_phy_caps_data *caps,
2673 struct ice_aqc_set_phy_cfg_data *cfg)
2675 if (!pi || !caps || !cfg)
2678 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
2679 cfg->phy_type_low = caps->phy_type_low;
2680 cfg->phy_type_high = caps->phy_type_high;
2681 cfg->caps = caps->caps;
2682 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2683 cfg->eee_cap = caps->eee_cap;
2684 cfg->eeer_value = caps->eeer_value;
2685 cfg->link_fec_opt = caps->link_fec_options;
2686 cfg->module_compliance_enforcement =
2687 caps->module_compliance_enforcement;
2689 if (ice_fw_supports_link_override(pi->hw)) {
2690 struct ice_link_default_override_tlv tlv;
2692 if (ice_get_link_default_override(&tlv, pi))
2695 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2696 cfg->module_compliance_enforcement |=
2697 ICE_LINK_OVERRIDE_STRICT_MODE;
2702 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2703 * @pi: port information structure
2704 * @cfg: PHY configuration data to set FEC mode
2705 * @fec: FEC mode to configure
2708 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2709 enum ice_fec_mode fec)
2711 struct ice_aqc_get_phy_caps_data *pcaps;
2712 enum ice_status status = ICE_SUCCESS;
2716 return ICE_ERR_BAD_PTR;
2720 pcaps = (struct ice_aqc_get_phy_caps_data *)
2721 ice_malloc(hw, sizeof(*pcaps));
2723 return ICE_ERR_NO_MEMORY;
2725 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
2732 /* Clear RS bits, and AND BASE-R ability
2733 * bits and OR request bits.
2735 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2736 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2737 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2738 ICE_AQC_PHY_FEC_25G_KR_REQ;
2741 /* Clear BASE-R bits, and AND RS ability
2742 * bits and OR request bits.
2744 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2745 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2746 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2749 /* Clear all FEC option bits. */
2750 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2753 /* AND auto FEC bit, and all caps bits. */
2754 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2755 cfg->link_fec_opt |= pcaps->link_fec_options;
2758 status = ICE_ERR_PARAM;
2762 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
2763 struct ice_link_default_override_tlv tlv;
2765 if (ice_get_link_default_override(&tlv, pi))
2768 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
2769 (tlv.options & ICE_LINK_OVERRIDE_EN))
2770 cfg->link_fec_opt = tlv.fec_options;
2774 ice_free(hw, pcaps);
2780 * ice_get_link_status - get status of the HW network link
2781 * @pi: port information structure
2782 * @link_up: pointer to bool (true/false = linkup/linkdown)
2784 * Variable link_up is true if link is up, false if link is down.
2785 * The variable link_up is invalid if status is non zero. As a
2786 * result of this call, link status reporting becomes enabled
2788 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2790 struct ice_phy_info *phy_info;
2791 enum ice_status status = ICE_SUCCESS;
2793 if (!pi || !link_up)
2794 return ICE_ERR_PARAM;
2796 phy_info = &pi->phy;
2798 if (phy_info->get_link_info) {
2799 status = ice_update_link_info(pi);
2802 ice_debug(pi->hw, ICE_DBG_LINK,
2803 "get link status error, status = %d\n",
2807 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2813 * ice_aq_set_link_restart_an
2814 * @pi: pointer to the port information structure
2815 * @ena_link: if true: enable link, if false: disable link
2816 * @cd: pointer to command details structure or NULL
2818 * Sets up the link and restarts the Auto-Negotiation over the link.
2821 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2822 struct ice_sq_cd *cd)
2824 struct ice_aqc_restart_an *cmd;
2825 struct ice_aq_desc desc;
2827 cmd = &desc.params.restart_an;
2829 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2831 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2832 cmd->lport_num = pi->lport;
2834 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2836 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2838 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2842 * ice_aq_set_event_mask
2843 * @hw: pointer to the HW struct
2844 * @port_num: port number of the physical function
2845 * @mask: event mask to be set
2846 * @cd: pointer to command details structure or NULL
2848 * Set event mask (0x0613)
2851 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2852 struct ice_sq_cd *cd)
2854 struct ice_aqc_set_event_mask *cmd;
2855 struct ice_aq_desc desc;
2857 cmd = &desc.params.set_event_mask;
2859 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2861 cmd->lport_num = port_num;
2863 cmd->event_mask = CPU_TO_LE16(mask);
2864 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2868 * ice_aq_set_mac_loopback
2869 * @hw: pointer to the HW struct
2870 * @ena_lpbk: Enable or Disable loopback
2871 * @cd: pointer to command details structure or NULL
2873 * Enable/disable loopback on a given port
2876 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2878 struct ice_aqc_set_mac_lb *cmd;
2879 struct ice_aq_desc desc;
2881 cmd = &desc.params.set_mac_lb;
2883 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2885 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2887 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2891 * ice_aq_set_port_id_led
2892 * @pi: pointer to the port information
2893 * @is_orig_mode: is this LED set to original mode (by the net-list)
2894 * @cd: pointer to command details structure or NULL
2896 * Set LED value for the given port (0x06e9)
2899 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2900 struct ice_sq_cd *cd)
2902 struct ice_aqc_set_port_id_led *cmd;
2903 struct ice_hw *hw = pi->hw;
2904 struct ice_aq_desc desc;
2906 cmd = &desc.params.set_port_id_led;
2908 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2911 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2913 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2915 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2920 * @hw: pointer to the HW struct
2921 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
2922 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
2923 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
2925 * @set_page: set or ignore the page
2926 * @data: pointer to data buffer to be read/written to the I2C device.
2927 * @length: 1-16 for read, 1 for write.
2928 * @write: 0 read, 1 for write.
2929 * @cd: pointer to command details structure or NULL
2931 * Read/Write SFF EEPROM (0x06EE)
2934 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
2935 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
2936 bool write, struct ice_sq_cd *cd)
2938 struct ice_aqc_sff_eeprom *cmd;
2939 struct ice_aq_desc desc;
2940 enum ice_status status;
2942 if (!data || (mem_addr & 0xff00))
2943 return ICE_ERR_PARAM;
2945 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
2946 cmd = &desc.params.read_write_sff_param;
2947 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
2948 cmd->lport_num = (u8)(lport & 0xff);
2949 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
2950 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
2951 ICE_AQC_SFF_I2CBUS_7BIT_M) |
2953 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
2954 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
2955 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
2956 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
2958 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
2960 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
2965 * __ice_aq_get_set_rss_lut
2966 * @hw: pointer to the hardware structure
2967 * @vsi_id: VSI FW index
2968 * @lut_type: LUT table type
2969 * @lut: pointer to the LUT buffer provided by the caller
2970 * @lut_size: size of the LUT buffer
2971 * @glob_lut_idx: global LUT index
2972 * @set: set true to set the table, false to get the table
2974 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2976 static enum ice_status
2977 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2978 u16 lut_size, u8 glob_lut_idx, bool set)
2980 struct ice_aqc_get_set_rss_lut *cmd_resp;
2981 struct ice_aq_desc desc;
2982 enum ice_status status;
2985 cmd_resp = &desc.params.get_set_rss_lut;
2988 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2989 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2991 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2994 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
2995 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2996 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2997 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3000 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3001 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3002 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3003 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3004 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3007 status = ICE_ERR_PARAM;
3008 goto ice_aq_get_set_rss_lut_exit;
3011 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3012 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3013 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3016 goto ice_aq_get_set_rss_lut_send;
3017 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3019 goto ice_aq_get_set_rss_lut_send;
3021 goto ice_aq_get_set_rss_lut_send;
3024 /* LUT size is only valid for Global and PF table types */
3026 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3027 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3028 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3029 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3031 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3032 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3033 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3034 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3036 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3037 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3038 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3039 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3040 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3045 status = ICE_ERR_PARAM;
3046 goto ice_aq_get_set_rss_lut_exit;
3049 ice_aq_get_set_rss_lut_send:
3050 cmd_resp->flags = CPU_TO_LE16(flags);
3051 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3053 ice_aq_get_set_rss_lut_exit:
3058 * ice_aq_get_rss_lut
3059 * @hw: pointer to the hardware structure
3060 * @vsi_handle: software VSI handle
3061 * @lut_type: LUT table type
3062 * @lut: pointer to the LUT buffer provided by the caller
3063 * @lut_size: size of the LUT buffer
3065 * get the RSS lookup table, PF or VSI type
3068 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3069 u8 *lut, u16 lut_size)
3071 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3072 return ICE_ERR_PARAM;
3074 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3075 lut_type, lut, lut_size, 0, false);
3079 * ice_aq_set_rss_lut
3080 * @hw: pointer to the hardware structure
3081 * @vsi_handle: software VSI handle
3082 * @lut_type: LUT table type
3083 * @lut: pointer to the LUT buffer provided by the caller
3084 * @lut_size: size of the LUT buffer
3086 * set the RSS lookup table, PF or VSI type
3089 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3090 u8 *lut, u16 lut_size)
3092 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3093 return ICE_ERR_PARAM;
3095 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3096 lut_type, lut, lut_size, 0, true);
3100 * __ice_aq_get_set_rss_key
3101 * @hw: pointer to the HW struct
3102 * @vsi_id: VSI FW index
3103 * @key: pointer to key info struct
3104 * @set: set true to set the key, false to get the key
3106 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3109 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3110 struct ice_aqc_get_set_rss_keys *key,
3113 struct ice_aqc_get_set_rss_key *cmd_resp;
3114 u16 key_size = sizeof(*key);
3115 struct ice_aq_desc desc;
3117 cmd_resp = &desc.params.get_set_rss_key;
3120 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3121 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3123 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3126 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3127 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3128 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3129 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3131 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3135 * ice_aq_get_rss_key
3136 * @hw: pointer to the HW struct
3137 * @vsi_handle: software VSI handle
3138 * @key: pointer to key info struct
3140 * get the RSS key per VSI
3143 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3144 struct ice_aqc_get_set_rss_keys *key)
3146 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3147 return ICE_ERR_PARAM;
3149 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3154 * ice_aq_set_rss_key
3155 * @hw: pointer to the HW struct
3156 * @vsi_handle: software VSI handle
3157 * @keys: pointer to key info struct
3159 * set the RSS key per VSI
3162 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3163 struct ice_aqc_get_set_rss_keys *keys)
3165 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3166 return ICE_ERR_PARAM;
3168 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3173 * ice_aq_add_lan_txq
3174 * @hw: pointer to the hardware structure
3175 * @num_qgrps: Number of added queue groups
3176 * @qg_list: list of queue groups to be added
3177 * @buf_size: size of buffer for indirect command
3178 * @cd: pointer to command details structure or NULL
3180 * Add Tx LAN queue (0x0C30)
3183 * Prior to calling add Tx LAN queue:
3184 * Initialize the following as part of the Tx queue context:
3185 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3186 * Cache profile and Packet shaper profile.
3188 * After add Tx LAN queue AQ command is completed:
3189 * Interrupts should be associated with specific queues,
3190 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3194 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3195 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3196 struct ice_sq_cd *cd)
3198 u16 i, sum_header_size, sum_q_size = 0;
3199 struct ice_aqc_add_tx_qgrp *list;
3200 struct ice_aqc_add_txqs *cmd;
3201 struct ice_aq_desc desc;
3203 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3205 cmd = &desc.params.add_txqs;
3207 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3210 return ICE_ERR_PARAM;
3212 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3213 return ICE_ERR_PARAM;
3215 sum_header_size = num_qgrps *
3216 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
3219 for (i = 0; i < num_qgrps; i++) {
3220 struct ice_aqc_add_txqs_perq *q = list->txqs;
3222 sum_q_size += list->num_txqs * sizeof(*q);
3223 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
3226 if (buf_size != (sum_header_size + sum_q_size))
3227 return ICE_ERR_PARAM;
3229 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3231 cmd->num_qgrps = num_qgrps;
3233 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3237 * ice_aq_dis_lan_txq
3238 * @hw: pointer to the hardware structure
3239 * @num_qgrps: number of groups in the list
3240 * @qg_list: the list of groups to disable
3241 * @buf_size: the total size of the qg_list buffer in bytes
3242 * @rst_src: if called due to reset, specifies the reset source
3243 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3244 * @cd: pointer to command details structure or NULL
3246 * Disable LAN Tx queue (0x0C31)
3248 static enum ice_status
3249 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3250 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3251 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3252 struct ice_sq_cd *cd)
3254 struct ice_aqc_dis_txqs *cmd;
3255 struct ice_aq_desc desc;
3256 enum ice_status status;
3259 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3260 cmd = &desc.params.dis_txqs;
3261 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3263 /* qg_list can be NULL only in VM/VF reset flow */
3264 if (!qg_list && !rst_src)
3265 return ICE_ERR_PARAM;
3267 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3268 return ICE_ERR_PARAM;
3270 cmd->num_entries = num_qgrps;
3272 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3273 ICE_AQC_Q_DIS_TIMEOUT_M);
3277 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3278 cmd->vmvf_and_timeout |=
3279 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3286 /* flush pipe on time out */
3287 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3288 /* If no queue group info, we are in a reset flow. Issue the AQ */
3292 /* set RD bit to indicate that command buffer is provided by the driver
3293 * and it needs to be read by the firmware
3295 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3297 for (i = 0; i < num_qgrps; ++i) {
3298 /* Calculate the size taken up by the queue IDs in this group */
3299 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
3301 /* Add the size of the group header */
3302 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
3304 /* If the num of queues is even, add 2 bytes of padding */
3305 if ((qg_list[i].num_qs % 2) == 0)
3310 return ICE_ERR_PARAM;
3313 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3316 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3317 vmvf_num, hw->adminq.sq_last_status);
3319 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3320 LE16_TO_CPU(qg_list[0].q_id[0]),
3321 hw->adminq.sq_last_status);
3327 * ice_aq_move_recfg_lan_txq
3328 * @hw: pointer to the hardware structure
3329 * @num_qs: number of queues to move/reconfigure
3330 * @is_move: true if this operation involves node movement
3331 * @is_tc_change: true if this operation involves a TC change
3332 * @subseq_call: true if this operation is a subsequent call
3333 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3334 * @timeout: timeout in units of 100 usec (valid values 0-50)
3335 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3336 * @buf: struct containing src/dest TEID and per-queue info
3337 * @buf_size: size of buffer for indirect command
3338 * @txqs_moved: out param, number of queues successfully moved
3339 * @cd: pointer to command details structure or NULL
3341 * Move / Reconfigure Tx LAN queues (0x0C32)
3344 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3345 bool is_tc_change, bool subseq_call, bool flush_pipe,
3346 u8 timeout, u32 *blocked_cgds,
3347 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3348 u8 *txqs_moved, struct ice_sq_cd *cd)
3350 struct ice_aqc_move_txqs *cmd;
3351 struct ice_aq_desc desc;
3352 enum ice_status status;
3354 cmd = &desc.params.move_txqs;
3355 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3357 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3358 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3359 return ICE_ERR_PARAM;
3361 if (is_tc_change && !flush_pipe && !blocked_cgds)
3362 return ICE_ERR_PARAM;
3364 if (!is_move && !is_tc_change)
3365 return ICE_ERR_PARAM;
3367 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3370 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3373 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3376 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3379 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3381 cmd->num_qs = num_qs;
3382 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3383 ICE_AQC_Q_CMD_TIMEOUT_M);
3385 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3387 if (!status && txqs_moved)
3388 *txqs_moved = cmd->num_qs;
3390 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3391 is_tc_change && !flush_pipe)
3392 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3397 /* End of FW Admin Queue command wrappers */
3400 * ice_write_byte - write a byte to a packed context structure
3401 * @src_ctx: the context structure to read from
3402 * @dest_ctx: the context to be written to
3403 * @ce_info: a description of the struct to be filled
3406 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3408 u8 src_byte, dest_byte, mask;
3412 /* copy from the next struct field */
3413 from = src_ctx + ce_info->offset;
3415 /* prepare the bits and mask */
3416 shift_width = ce_info->lsb % 8;
3417 mask = (u8)(BIT(ce_info->width) - 1);
3422 /* shift to correct alignment */
3423 mask <<= shift_width;
3424 src_byte <<= shift_width;
3426 /* get the current bits from the target bit string */
3427 dest = dest_ctx + (ce_info->lsb / 8);
3429 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3431 dest_byte &= ~mask; /* get the bits not changing */
3432 dest_byte |= src_byte; /* add in the new bits */
3434 /* put it all back */
3435 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3439 * ice_write_word - write a word to a packed context structure
3440 * @src_ctx: the context structure to read from
3441 * @dest_ctx: the context to be written to
3442 * @ce_info: a description of the struct to be filled
3445 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3452 /* copy from the next struct field */
3453 from = src_ctx + ce_info->offset;
3455 /* prepare the bits and mask */
3456 shift_width = ce_info->lsb % 8;
3457 mask = BIT(ce_info->width) - 1;
3459 /* don't swizzle the bits until after the mask because the mask bits
3460 * will be in a different bit position on big endian machines
3462 src_word = *(u16 *)from;
3465 /* shift to correct alignment */
3466 mask <<= shift_width;
3467 src_word <<= shift_width;
3469 /* get the current bits from the target bit string */
3470 dest = dest_ctx + (ce_info->lsb / 8);
3472 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3474 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3475 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3477 /* put it all back */
3478 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3482 * ice_write_dword - write a dword to a packed context structure
3483 * @src_ctx: the context structure to read from
3484 * @dest_ctx: the context to be written to
3485 * @ce_info: a description of the struct to be filled
3488 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3490 u32 src_dword, mask;
3495 /* copy from the next struct field */
3496 from = src_ctx + ce_info->offset;
3498 /* prepare the bits and mask */
3499 shift_width = ce_info->lsb % 8;
3501 /* if the field width is exactly 32 on an x86 machine, then the shift
3502 * operation will not work because the SHL instructions count is masked
3503 * to 5 bits so the shift will do nothing
3505 if (ce_info->width < 32)
3506 mask = BIT(ce_info->width) - 1;
3510 /* don't swizzle the bits until after the mask because the mask bits
3511 * will be in a different bit position on big endian machines
3513 src_dword = *(u32 *)from;
3516 /* shift to correct alignment */
3517 mask <<= shift_width;
3518 src_dword <<= shift_width;
3520 /* get the current bits from the target bit string */
3521 dest = dest_ctx + (ce_info->lsb / 8);
3523 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3525 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3526 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3528 /* put it all back */
3529 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3533 * ice_write_qword - write a qword to a packed context structure
3534 * @src_ctx: the context structure to read from
3535 * @dest_ctx: the context to be written to
3536 * @ce_info: a description of the struct to be filled
3539 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3541 u64 src_qword, mask;
3546 /* copy from the next struct field */
3547 from = src_ctx + ce_info->offset;
3549 /* prepare the bits and mask */
3550 shift_width = ce_info->lsb % 8;
3552 /* if the field width is exactly 64 on an x86 machine, then the shift
3553 * operation will not work because the SHL instructions count is masked
3554 * to 6 bits so the shift will do nothing
3556 if (ce_info->width < 64)
3557 mask = BIT_ULL(ce_info->width) - 1;
3561 /* don't swizzle the bits until after the mask because the mask bits
3562 * will be in a different bit position on big endian machines
3564 src_qword = *(u64 *)from;
3567 /* shift to correct alignment */
3568 mask <<= shift_width;
3569 src_qword <<= shift_width;
3571 /* get the current bits from the target bit string */
3572 dest = dest_ctx + (ce_info->lsb / 8);
3574 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3576 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
3577 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
3579 /* put it all back */
3580 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3584 * ice_set_ctx - set context bits in packed structure
3585 * @src_ctx: pointer to a generic non-packed context structure
3586 * @dest_ctx: pointer to memory for the packed structure
3587 * @ce_info: a description of the structure to be transformed
3590 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3594 for (f = 0; ce_info[f].width; f++) {
3595 /* We have to deal with each element of the FW response
3596 * using the correct size so that we are correct regardless
3597 * of the endianness of the machine.
3599 switch (ce_info[f].size_of) {
3601 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3604 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3607 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3610 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3613 return ICE_ERR_INVAL_SIZE;
3621 * ice_read_byte - read context byte into struct
3622 * @src_ctx: the context structure to read from
3623 * @dest_ctx: the context to be written to
3624 * @ce_info: a description of the struct to be filled
3627 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3633 /* prepare the bits and mask */
3634 shift_width = ce_info->lsb % 8;
3635 mask = (u8)(BIT(ce_info->width) - 1);
3637 /* shift to correct alignment */
3638 mask <<= shift_width;
3640 /* get the current bits from the src bit string */
3641 src = src_ctx + (ce_info->lsb / 8);
3643 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3645 dest_byte &= ~(mask);
3647 dest_byte >>= shift_width;
3649 /* get the address from the struct field */
3650 target = dest_ctx + ce_info->offset;
3652 /* put it back in the struct */
3653 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3657 * ice_read_word - read context word into struct
3658 * @src_ctx: the context structure to read from
3659 * @dest_ctx: the context to be written to
3660 * @ce_info: a description of the struct to be filled
3663 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3665 u16 dest_word, mask;
3670 /* prepare the bits and mask */
3671 shift_width = ce_info->lsb % 8;
3672 mask = BIT(ce_info->width) - 1;
3674 /* shift to correct alignment */
3675 mask <<= shift_width;
3677 /* get the current bits from the src bit string */
3678 src = src_ctx + (ce_info->lsb / 8);
3680 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
3682 /* the data in the memory is stored as little endian so mask it
3685 src_word &= ~(CPU_TO_LE16(mask));
3687 /* get the data back into host order before shifting */
3688 dest_word = LE16_TO_CPU(src_word);
3690 dest_word >>= shift_width;
3692 /* get the address from the struct field */
3693 target = dest_ctx + ce_info->offset;
3695 /* put it back in the struct */
3696 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3700 * ice_read_dword - read context dword into struct
3701 * @src_ctx: the context structure to read from
3702 * @dest_ctx: the context to be written to
3703 * @ce_info: a description of the struct to be filled
3706 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3708 u32 dest_dword, mask;
3713 /* prepare the bits and mask */
3714 shift_width = ce_info->lsb % 8;
3716 /* if the field width is exactly 32 on an x86 machine, then the shift
3717 * operation will not work because the SHL instructions count is masked
3718 * to 5 bits so the shift will do nothing
3720 if (ce_info->width < 32)
3721 mask = BIT(ce_info->width) - 1;
3725 /* shift to correct alignment */
3726 mask <<= shift_width;
3728 /* get the current bits from the src bit string */
3729 src = src_ctx + (ce_info->lsb / 8);
3731 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
3733 /* the data in the memory is stored as little endian so mask it
3736 src_dword &= ~(CPU_TO_LE32(mask));
3738 /* get the data back into host order before shifting */
3739 dest_dword = LE32_TO_CPU(src_dword);
3741 dest_dword >>= shift_width;
3743 /* get the address from the struct field */
3744 target = dest_ctx + ce_info->offset;
3746 /* put it back in the struct */
3747 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3751 * ice_read_qword - read context qword into struct
3752 * @src_ctx: the context structure to read from
3753 * @dest_ctx: the context to be written to
3754 * @ce_info: a description of the struct to be filled
3757 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3759 u64 dest_qword, mask;
3764 /* prepare the bits and mask */
3765 shift_width = ce_info->lsb % 8;
3767 /* if the field width is exactly 64 on an x86 machine, then the shift
3768 * operation will not work because the SHL instructions count is masked
3769 * to 6 bits so the shift will do nothing
3771 if (ce_info->width < 64)
3772 mask = BIT_ULL(ce_info->width) - 1;
3776 /* shift to correct alignment */
3777 mask <<= shift_width;
3779 /* get the current bits from the src bit string */
3780 src = src_ctx + (ce_info->lsb / 8);
3782 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
3784 /* the data in the memory is stored as little endian so mask it
3787 src_qword &= ~(CPU_TO_LE64(mask));
3789 /* get the data back into host order before shifting */
3790 dest_qword = LE64_TO_CPU(src_qword);
3792 dest_qword >>= shift_width;
3794 /* get the address from the struct field */
3795 target = dest_ctx + ce_info->offset;
3797 /* put it back in the struct */
3798 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3802 * ice_get_ctx - extract context bits from a packed structure
3803 * @src_ctx: pointer to a generic packed context structure
3804 * @dest_ctx: pointer to a generic non-packed context structure
3805 * @ce_info: a description of the structure to be read from
3808 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3812 for (f = 0; ce_info[f].width; f++) {
3813 switch (ce_info[f].size_of) {
3815 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
3818 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
3821 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
3824 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
3827 /* nothing to do, just keep going */
3836 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3837 * @hw: pointer to the HW struct
3838 * @vsi_handle: software VSI handle
3840 * @q_handle: software queue handle
3843 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3845 struct ice_vsi_ctx *vsi;
3846 struct ice_q_ctx *q_ctx;
3848 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3851 if (q_handle >= vsi->num_lan_q_entries[tc])
3853 if (!vsi->lan_q_ctx[tc])
3855 q_ctx = vsi->lan_q_ctx[tc];
3856 return &q_ctx[q_handle];
3861 * @pi: port information structure
3862 * @vsi_handle: software VSI handle
3864 * @q_handle: software queue handle
3865 * @num_qgrps: Number of added queue groups
3866 * @buf: list of queue groups to be added
3867 * @buf_size: size of buffer for indirect command
3868 * @cd: pointer to command details structure or NULL
3870 * This function adds one LAN queue
3873 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3874 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3875 struct ice_sq_cd *cd)
3877 struct ice_aqc_txsched_elem_data node = { 0 };
3878 struct ice_sched_node *parent;
3879 struct ice_q_ctx *q_ctx;
3880 enum ice_status status;
3883 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3886 if (num_qgrps > 1 || buf->num_txqs > 1)
3887 return ICE_ERR_MAX_LIMIT;
3891 if (!ice_is_vsi_valid(hw, vsi_handle))
3892 return ICE_ERR_PARAM;
3894 ice_acquire_lock(&pi->sched_lock);
3896 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3898 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3900 status = ICE_ERR_PARAM;
3904 /* find a parent node */
3905 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3906 ICE_SCHED_NODE_OWNER_LAN);
3908 status = ICE_ERR_PARAM;
3912 buf->parent_teid = parent->info.node_teid;
3913 node.parent_teid = parent->info.node_teid;
3914 /* Mark that the values in the "generic" section as valid. The default
3915 * value in the "generic" section is zero. This means that :
3916 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3917 * - 0 priority among siblings, indicated by Bit 1-3.
3918 * - WFQ, indicated by Bit 4.
3919 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3921 * - Bit 7 is reserved.
3922 * Without setting the generic section as valid in valid_sections, the
3923 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3925 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3927 /* add the LAN queue */
3928 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3929 if (status != ICE_SUCCESS) {
3930 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3931 LE16_TO_CPU(buf->txqs[0].txq_id),
3932 hw->adminq.sq_last_status);
3936 node.node_teid = buf->txqs[0].q_teid;
3937 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3938 q_ctx->q_handle = q_handle;
3939 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
3941 /* add a leaf node into scheduler tree queue layer */
3942 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3944 status = ice_sched_replay_q_bw(pi, q_ctx);
3947 ice_release_lock(&pi->sched_lock);
3953 * @pi: port information structure
3954 * @vsi_handle: software VSI handle
3956 * @num_queues: number of queues
3957 * @q_handles: pointer to software queue handle array
3958 * @q_ids: pointer to the q_id array
3959 * @q_teids: pointer to queue node teids
3960 * @rst_src: if called due to reset, specifies the reset source
3961 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3962 * @cd: pointer to command details structure or NULL
3964 * This function removes queues and their corresponding nodes in SW DB
3967 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3968 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3969 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3970 struct ice_sq_cd *cd)
3972 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3973 struct ice_aqc_dis_txq_item qg_list;
3974 struct ice_q_ctx *q_ctx;
3977 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3981 /* if queue is disabled already yet the disable queue command
3982 * has to be sent to complete the VF reset, then call
3983 * ice_aq_dis_lan_txq without any queue information
3986 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3991 ice_acquire_lock(&pi->sched_lock);
3993 for (i = 0; i < num_queues; i++) {
3994 struct ice_sched_node *node;
3996 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3999 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
4001 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4005 if (q_ctx->q_handle != q_handles[i]) {
4006 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4007 q_ctx->q_handle, q_handles[i]);
4010 qg_list.parent_teid = node->info.parent_teid;
4012 qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
4013 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
4014 sizeof(qg_list), rst_src, vmvf_num,
4017 if (status != ICE_SUCCESS)
4019 ice_free_sched_node(pi, node);
4020 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4022 ice_release_lock(&pi->sched_lock);
4027 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4028 * @pi: port information structure
4029 * @vsi_handle: software VSI handle
4030 * @tc_bitmap: TC bitmap
4031 * @maxqs: max queues array per TC
4032 * @owner: LAN or RDMA
4034 * This function adds/updates the VSI queues per TC.
4036 static enum ice_status
4037 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4038 u16 *maxqs, u8 owner)
4040 enum ice_status status = ICE_SUCCESS;
4043 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4046 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4047 return ICE_ERR_PARAM;
4049 ice_acquire_lock(&pi->sched_lock);
4051 ice_for_each_traffic_class(i) {
4052 /* configuration is possible only if TC node is present */
4053 if (!ice_sched_get_tc_node(pi, i))
4056 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4057 ice_is_tc_ena(tc_bitmap, i));
4062 ice_release_lock(&pi->sched_lock);
4067 * ice_cfg_vsi_lan - configure VSI LAN queues
4068 * @pi: port information structure
4069 * @vsi_handle: software VSI handle
4070 * @tc_bitmap: TC bitmap
4071 * @max_lanqs: max LAN queues array per TC
4073 * This function adds/updates the VSI LAN queues per TC.
4076 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4079 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4080 ICE_SCHED_NODE_OWNER_LAN);
4084 * ice_replay_pre_init - replay pre initialization
4085 * @hw: pointer to the HW struct
4087 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4089 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4091 struct ice_switch_info *sw = hw->switch_info;
4094 /* Delete old entries from replay filter list head if there is any */
4095 ice_rm_all_sw_replay_rule_info(hw);
4096 /* In start of replay, move entries into replay_rules list, it
4097 * will allow adding rules entries back to filt_rules list,
4098 * which is operational list.
4100 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4101 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4102 &sw->recp_list[i].filt_replay_rules);
4103 ice_sched_replay_agg_vsi_preinit(hw);
4105 return ice_sched_replay_tc_node_bw(hw->port_info);
4109 * ice_replay_vsi - replay VSI configuration
4110 * @hw: pointer to the HW struct
4111 * @vsi_handle: driver VSI handle
4113 * Restore all VSI configuration after reset. It is required to call this
4114 * function with main VSI first.
4116 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4118 enum ice_status status;
4120 if (!ice_is_vsi_valid(hw, vsi_handle))
4121 return ICE_ERR_PARAM;
4123 /* Replay pre-initialization if there is any */
4124 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4125 status = ice_replay_pre_init(hw);
4129 /* Replay per VSI all RSS configurations */
4130 status = ice_replay_rss_cfg(hw, vsi_handle);
4133 /* Replay per VSI all filters */
4134 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4136 status = ice_replay_vsi_agg(hw, vsi_handle);
4141 * ice_replay_post - post replay configuration cleanup
4142 * @hw: pointer to the HW struct
4144 * Post replay cleanup.
4146 void ice_replay_post(struct ice_hw *hw)
4148 /* Delete old entries from replay filter list head */
4149 ice_rm_all_sw_replay_rule_info(hw);
4150 ice_sched_replay_agg(hw);
4154 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4155 * @hw: ptr to the hardware info
4156 * @reg: offset of 64 bit HW register to read from
4157 * @prev_stat_loaded: bool to specify if previous stats are loaded
4158 * @prev_stat: ptr to previous loaded stat value
4159 * @cur_stat: ptr to current stat value
4162 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4163 u64 *prev_stat, u64 *cur_stat)
4165 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4167 /* device stats are not reset at PFR, they likely will not be zeroed
4168 * when the driver starts. Thus, save the value from the first read
4169 * without adding to the statistic value so that we report stats which
4170 * count up from zero.
4172 if (!prev_stat_loaded) {
4173 *prev_stat = new_data;
4177 /* Calculate the difference between the new and old values, and then
4178 * add it to the software stat value.
4180 if (new_data >= *prev_stat)
4181 *cur_stat += new_data - *prev_stat;
4183 /* to manage the potential roll-over */
4184 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4186 /* Update the previously stored value to prepare for next read */
4187 *prev_stat = new_data;
4191 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4192 * @hw: ptr to the hardware info
4193 * @reg: offset of HW register to read from
4194 * @prev_stat_loaded: bool to specify if previous stats are loaded
4195 * @prev_stat: ptr to previous loaded stat value
4196 * @cur_stat: ptr to current stat value
4199 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4200 u64 *prev_stat, u64 *cur_stat)
4204 new_data = rd32(hw, reg);
4206 /* device stats are not reset at PFR, they likely will not be zeroed
4207 * when the driver starts. Thus, save the value from the first read
4208 * without adding to the statistic value so that we report stats which
4209 * count up from zero.
4211 if (!prev_stat_loaded) {
4212 *prev_stat = new_data;
4216 /* Calculate the difference between the new and old values, and then
4217 * add it to the software stat value.
4219 if (new_data >= *prev_stat)
4220 *cur_stat += new_data - *prev_stat;
4222 /* to manage the potential roll-over */
4223 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4225 /* Update the previously stored value to prepare for next read */
4226 *prev_stat = new_data;
4230 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4231 * @hw: ptr to the hardware info
4232 * @vsi_handle: VSI handle
4233 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4234 * @cur_stats: ptr to current stats structure
4236 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4237 * thus cannot be read using the normal ice_stat_update32 function.
4239 * Read the GLV_REPC register associated with the given VSI, and update the
4240 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4242 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4243 * cleared each time it's read.
4245 * Note that the GLV_RDPC register also counts the causes that would trigger
4246 * GLV_REPC. However, it does not give the finer grained detail about why the
4247 * packets are being dropped. The GLV_REPC values can be used to distinguish
4248 * whether Rx packets are dropped due to errors or due to no available
4252 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4253 struct ice_eth_stats *cur_stats)
4255 u16 vsi_num, no_desc, error_cnt;
4258 if (!ice_is_vsi_valid(hw, vsi_handle))
4261 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4263 /* If we haven't loaded stats yet, just clear the current value */
4264 if (!prev_stat_loaded) {
4265 wr32(hw, GLV_REPC(vsi_num), 0);
4269 repc = rd32(hw, GLV_REPC(vsi_num));
4270 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4271 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4273 /* Clear the count by writing to the stats register */
4274 wr32(hw, GLV_REPC(vsi_num), 0);
4276 cur_stats->rx_no_desc += no_desc;
4277 cur_stats->rx_errors += error_cnt;
4281 * ice_sched_query_elem - query element information from HW
4282 * @hw: pointer to the HW struct
4283 * @node_teid: node TEID to be queried
4284 * @buf: buffer to element information
4286 * This function queries HW element information
4289 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4290 struct ice_aqc_get_elem *buf)
4292 u16 buf_size, num_elem_ret = 0;
4293 enum ice_status status;
4295 buf_size = sizeof(*buf);
4296 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4297 buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
4298 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4300 if (status != ICE_SUCCESS || num_elem_ret != 1)
4301 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4306 * ice_get_fw_mode - returns FW mode
4307 * @hw: pointer to the HW struct
4309 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4311 #define ICE_FW_MODE_DBG_M BIT(0)
4312 #define ICE_FW_MODE_REC_M BIT(1)
4313 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4316 /* check the current FW mode */
4317 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4319 if (fw_mode & ICE_FW_MODE_DBG_M)
4320 return ICE_FW_MODE_DBG;
4321 else if (fw_mode & ICE_FW_MODE_REC_M)
4322 return ICE_FW_MODE_REC;
4323 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4324 return ICE_FW_MODE_ROLLBACK;
4326 return ICE_FW_MODE_NORMAL;
4330 * ice_fw_supports_link_override
4331 * @hw: pointer to the hardware structure
4333 * Checks if the firmware supports link override
4335 bool ice_fw_supports_link_override(struct ice_hw *hw)
4337 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4338 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4340 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4341 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4343 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4351 * ice_get_link_default_override
4352 * @ldo: pointer to the link default override struct
4353 * @pi: pointer to the port info struct
4355 * Gets the link default override for a port
4358 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4359 struct ice_port_info *pi)
4361 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4362 struct ice_hw *hw = pi->hw;
4363 enum ice_status status;
4365 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4366 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4368 ice_debug(hw, ICE_DBG_INIT,
4369 "Failed to read link override TLV.\n");
4373 /* Each port has its own config; calculate for our port */
4374 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4375 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4377 /* link options first */
4378 status = ice_read_sr_word(hw, tlv_start, &buf);
4380 ice_debug(hw, ICE_DBG_INIT,
4381 "Failed to read override link options.\n");
4384 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4385 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4386 ICE_LINK_OVERRIDE_PHY_CFG_S;
4388 /* link PHY config */
4389 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4390 status = ice_read_sr_word(hw, offset, &buf);
4392 ice_debug(hw, ICE_DBG_INIT,
4393 "Failed to read override phy config.\n");
4396 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4399 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4400 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4401 status = ice_read_sr_word(hw, (offset + i), &buf);
4403 ice_debug(hw, ICE_DBG_INIT,
4404 "Failed to read override link options.\n");
4407 /* shift 16 bits at a time to fill 64 bits */
4408 ldo->phy_type_low |= ((u64)buf << (i * 16));
4411 /* PHY types high */
4412 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4413 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4414 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4415 status = ice_read_sr_word(hw, (offset + i), &buf);
4417 ice_debug(hw, ICE_DBG_INIT,
4418 "Failed to read override link options.\n");
4421 /* shift 16 bits at a time to fill 64 bits */
4422 ldo->phy_type_high |= ((u64)buf << (i * 16));