1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 300
15 * ice_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
18 * This function sets the MAC type of the adapter based on the
19 * vendor ID and device ID stored in the HW structure.
21 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
23 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
25 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
26 return ICE_ERR_DEVICE_NOT_SUPPORTED;
28 switch (hw->device_id) {
29 case ICE_DEV_ID_E810C_BACKPLANE:
30 case ICE_DEV_ID_E810C_QSFP:
31 case ICE_DEV_ID_E810C_SFP:
32 case ICE_DEV_ID_E810_XXV_BACKPLANE:
33 case ICE_DEV_ID_E810_XXV_QSFP:
34 case ICE_DEV_ID_E810_XXV_SFP:
35 hw->mac_type = ICE_MAC_E810;
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 hw->mac_type = ICE_MAC_GENERIC;
49 hw->mac_type = ICE_MAC_UNKNOWN;
53 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
58 * ice_clear_pf_cfg - Clear PF configuration
59 * @hw: pointer to the hardware structure
61 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
62 * configuration, flow director filters, etc.).
64 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
66 struct ice_aq_desc desc;
68 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
70 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
74 * ice_aq_manage_mac_read - manage MAC address read command
75 * @hw: pointer to the HW struct
76 * @buf: a virtual buffer to hold the manage MAC read response
77 * @buf_size: Size of the virtual buffer
78 * @cd: pointer to command details structure or NULL
80 * This function is used to return per PF station MAC address (0x0107).
81 * NOTE: Upon successful completion of this command, MAC address information
82 * is returned in user specified buffer. Please interpret user specified
83 * buffer as "manage_mac_read" response.
84 * Response such as various MAC addresses are stored in HW struct (port.mac)
85 * ice_aq_discover_caps is expected to be called before this function is called.
87 static enum ice_status
88 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
91 struct ice_aqc_manage_mac_read_resp *resp;
92 struct ice_aqc_manage_mac_read *cmd;
93 struct ice_aq_desc desc;
94 enum ice_status status;
98 cmd = &desc.params.mac_read;
100 if (buf_size < sizeof(*resp))
101 return ICE_ERR_BUF_TOO_SHORT;
103 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
105 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
109 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
110 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
112 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
113 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
117 /* A single port can report up to two (LAN and WoL) addresses */
118 for (i = 0; i < cmd->num_addr; i++)
119 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
120 ice_memcpy(hw->port_info->mac.lan_addr,
121 resp[i].mac_addr, ETH_ALEN,
123 ice_memcpy(hw->port_info->mac.perm_addr,
125 ETH_ALEN, ICE_DMA_TO_NONDMA);
132 * ice_aq_get_phy_caps - returns PHY capabilities
133 * @pi: port information structure
134 * @qual_mods: report qualified modules
135 * @report_mode: report mode capabilities
136 * @pcaps: structure for PHY capabilities to be filled
137 * @cd: pointer to command details structure or NULL
139 * Returns the various PHY capabilities supported on the Port (0x0600)
142 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
143 struct ice_aqc_get_phy_caps_data *pcaps,
144 struct ice_sq_cd *cd)
146 struct ice_aqc_get_phy_caps *cmd;
147 u16 pcaps_size = sizeof(*pcaps);
148 struct ice_aq_desc desc;
149 enum ice_status status;
151 cmd = &desc.params.get_phy;
153 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
154 return ICE_ERR_PARAM;
156 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
159 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
161 cmd->param0 |= CPU_TO_LE16(report_mode);
162 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
164 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
165 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
166 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
173 * ice_aq_get_link_topo_handle - get link topology node return status
174 * @pi: port information structure
175 * @node_type: requested node type
176 * @cd: pointer to command details structure or NULL
178 * Get link topology node return status for specified node type (0x06E0)
180 * Node type cage can be used to determine if cage is present. If AQC
181 * returns error (ENOENT), then no cage present. If no cage present, then
182 * connection type is backplane or BASE-T.
184 static enum ice_status
185 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
186 struct ice_sq_cd *cd)
188 struct ice_aqc_get_link_topo *cmd;
189 struct ice_aq_desc desc;
191 cmd = &desc.params.get_link_topo;
193 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
195 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
196 ICE_AQC_LINK_TOPO_NODE_CTX_S);
199 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
201 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
205 * ice_is_media_cage_present
206 * @pi: port information structure
208 * Returns true if media cage is present, else false. If no cage, then
209 * media type is backplane or BASE-T.
211 static bool ice_is_media_cage_present(struct ice_port_info *pi)
213 /* Node type cage can be used to determine if cage is present. If AQC
214 * returns error (ENOENT), then no cage present. If no cage present then
215 * connection type is backplane or BASE-T.
217 return !ice_aq_get_link_topo_handle(pi,
218 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
223 * ice_get_media_type - Gets media type
224 * @pi: port information structure
226 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
228 struct ice_link_status *hw_link_info;
231 return ICE_MEDIA_UNKNOWN;
233 hw_link_info = &pi->phy.link_info;
234 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
235 /* If more than one media type is selected, report unknown */
236 return ICE_MEDIA_UNKNOWN;
238 if (hw_link_info->phy_type_low) {
239 switch (hw_link_info->phy_type_low) {
240 case ICE_PHY_TYPE_LOW_1000BASE_SX:
241 case ICE_PHY_TYPE_LOW_1000BASE_LX:
242 case ICE_PHY_TYPE_LOW_10GBASE_SR:
243 case ICE_PHY_TYPE_LOW_10GBASE_LR:
244 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
245 case ICE_PHY_TYPE_LOW_25GBASE_SR:
246 case ICE_PHY_TYPE_LOW_25GBASE_LR:
247 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
248 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
249 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
250 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
251 case ICE_PHY_TYPE_LOW_50GBASE_SR:
252 case ICE_PHY_TYPE_LOW_50GBASE_FR:
253 case ICE_PHY_TYPE_LOW_50GBASE_LR:
254 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
255 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
256 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
257 case ICE_PHY_TYPE_LOW_100GBASE_DR:
258 return ICE_MEDIA_FIBER;
259 case ICE_PHY_TYPE_LOW_100BASE_TX:
260 case ICE_PHY_TYPE_LOW_1000BASE_T:
261 case ICE_PHY_TYPE_LOW_2500BASE_T:
262 case ICE_PHY_TYPE_LOW_5GBASE_T:
263 case ICE_PHY_TYPE_LOW_10GBASE_T:
264 case ICE_PHY_TYPE_LOW_25GBASE_T:
265 return ICE_MEDIA_BASET;
266 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
267 case ICE_PHY_TYPE_LOW_25GBASE_CR:
268 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
269 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
270 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
271 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
272 case ICE_PHY_TYPE_LOW_50GBASE_CP:
273 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
274 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
275 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
277 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
278 case ICE_PHY_TYPE_LOW_40G_XLAUI:
279 case ICE_PHY_TYPE_LOW_50G_LAUI2:
280 case ICE_PHY_TYPE_LOW_50G_AUI2:
281 case ICE_PHY_TYPE_LOW_50G_AUI1:
282 case ICE_PHY_TYPE_LOW_100G_AUI4:
283 case ICE_PHY_TYPE_LOW_100G_CAUI4:
284 if (ice_is_media_cage_present(pi))
287 case ICE_PHY_TYPE_LOW_1000BASE_KX:
288 case ICE_PHY_TYPE_LOW_2500BASE_KX:
289 case ICE_PHY_TYPE_LOW_2500BASE_X:
290 case ICE_PHY_TYPE_LOW_5GBASE_KR:
291 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
292 case ICE_PHY_TYPE_LOW_25GBASE_KR:
293 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
294 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
295 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
296 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
297 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
298 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
299 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
300 return ICE_MEDIA_BACKPLANE;
303 switch (hw_link_info->phy_type_high) {
304 case ICE_PHY_TYPE_HIGH_100G_AUI2:
305 if (ice_is_media_cage_present(pi))
308 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
309 return ICE_MEDIA_BACKPLANE;
312 return ICE_MEDIA_UNKNOWN;
316 * ice_aq_get_link_info
317 * @pi: port information structure
318 * @ena_lse: enable/disable LinkStatusEvent reporting
319 * @link: pointer to link status structure - optional
320 * @cd: pointer to command details structure or NULL
322 * Get Link Status (0x607). Returns the link status of the adapter.
325 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
326 struct ice_link_status *link, struct ice_sq_cd *cd)
328 struct ice_aqc_get_link_status_data link_data = { 0 };
329 struct ice_aqc_get_link_status *resp;
330 struct ice_link_status *li_old, *li;
331 enum ice_media_type *hw_media_type;
332 struct ice_fc_info *hw_fc_info;
333 bool tx_pause, rx_pause;
334 struct ice_aq_desc desc;
335 enum ice_status status;
340 return ICE_ERR_PARAM;
342 li_old = &pi->phy.link_info_old;
343 hw_media_type = &pi->phy.media_type;
344 li = &pi->phy.link_info;
345 hw_fc_info = &pi->fc;
347 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
348 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
349 resp = &desc.params.get_link_status;
350 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
351 resp->lport_num = pi->lport;
353 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
355 if (status != ICE_SUCCESS)
358 /* save off old link status information */
361 /* update current link status information */
362 li->link_speed = LE16_TO_CPU(link_data.link_speed);
363 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
364 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
365 *hw_media_type = ice_get_media_type(pi);
366 li->link_info = link_data.link_info;
367 li->an_info = link_data.an_info;
368 li->ext_info = link_data.ext_info;
369 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
370 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
371 li->topo_media_conflict = link_data.topo_media_conflict;
372 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
373 ICE_AQ_CFG_PACING_TYPE_M);
376 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
377 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
378 if (tx_pause && rx_pause)
379 hw_fc_info->current_mode = ICE_FC_FULL;
381 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
383 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
385 hw_fc_info->current_mode = ICE_FC_NONE;
387 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
389 ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
390 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
391 (unsigned long long)li->phy_type_low);
392 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
393 (unsigned long long)li->phy_type_high);
394 ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
395 ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
396 ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
397 ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
398 ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
399 ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
400 ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
402 /* save link status information */
406 /* flag cleared so calling functions don't call AQ again */
407 pi->phy.get_link_info = false;
414 * @hw: pointer to the HW struct
415 * @max_frame_size: Maximum Frame Size to be supported
416 * @cd: pointer to command details structure or NULL
418 * Set MAC configuration (0x0603)
421 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
423 u16 fc_threshold_val, tx_timer_val;
424 struct ice_aqc_set_mac_cfg *cmd;
425 struct ice_aq_desc desc;
428 cmd = &desc.params.set_mac_cfg;
430 if (max_frame_size == 0)
431 return ICE_ERR_PARAM;
433 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
435 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
437 /* We read back the transmit timer and fc threshold value of
438 * LFC. Thus, we will use index =
439 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
441 * Also, because we are opearating on transmit timer and fc
442 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
444 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
446 /* Retrieve the transmit timer */
448 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
449 tx_timer_val = reg_val &
450 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
451 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
453 /* Retrieve the fc threshold */
455 PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
456 fc_threshold_val = reg_val & MAKEMASK(0xFFFF, 0);
457 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_threshold_val);
459 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
463 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
464 * @hw: pointer to the HW struct
466 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
468 struct ice_switch_info *sw;
470 hw->switch_info = (struct ice_switch_info *)
471 ice_malloc(hw, sizeof(*hw->switch_info));
472 sw = hw->switch_info;
475 return ICE_ERR_NO_MEMORY;
477 INIT_LIST_HEAD(&sw->vsi_list_map_head);
479 return ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
483 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
484 * @hw: pointer to the HW struct
486 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
488 struct ice_switch_info *sw = hw->switch_info;
489 struct ice_vsi_list_map_info *v_pos_map;
490 struct ice_vsi_list_map_info *v_tmp_map;
491 struct ice_sw_recipe *recps;
494 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
495 ice_vsi_list_map_info, list_entry) {
496 LIST_DEL(&v_pos_map->list_entry);
497 ice_free(hw, v_pos_map);
499 recps = hw->switch_info->recp_list;
500 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
501 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
503 recps[i].root_rid = i;
504 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
505 &recps[i].rg_list, ice_recp_grp_entry,
507 LIST_DEL(&rg_entry->l_entry);
508 ice_free(hw, rg_entry);
511 if (recps[i].adv_rule) {
512 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
513 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
515 ice_destroy_lock(&recps[i].filt_rule_lock);
516 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
517 &recps[i].filt_rules,
518 ice_adv_fltr_mgmt_list_entry,
520 LIST_DEL(&lst_itr->list_entry);
521 ice_free(hw, lst_itr->lkups);
522 ice_free(hw, lst_itr);
525 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
527 ice_destroy_lock(&recps[i].filt_rule_lock);
528 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
529 &recps[i].filt_rules,
530 ice_fltr_mgmt_list_entry,
532 LIST_DEL(&lst_itr->list_entry);
533 ice_free(hw, lst_itr);
536 if (recps[i].root_buf)
537 ice_free(hw, recps[i].root_buf);
539 ice_rm_all_sw_replay_rule_info(hw);
540 ice_free(hw, sw->recp_list);
545 * ice_get_itr_intrl_gran
546 * @hw: pointer to the HW struct
548 * Determines the ITR/INTRL granularities based on the maximum aggregate
549 * bandwidth according to the device's configuration during power-on.
551 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
553 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
554 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
555 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
557 switch (max_agg_bw) {
558 case ICE_MAX_AGG_BW_200G:
559 case ICE_MAX_AGG_BW_100G:
560 case ICE_MAX_AGG_BW_50G:
561 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
562 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
564 case ICE_MAX_AGG_BW_25G:
565 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
566 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
572 * ice_get_nvm_version - get cached NVM version data
573 * @hw: pointer to the hardware structure
574 * @oem_ver: 8 bit NVM version
575 * @oem_build: 16 bit NVM build number
576 * @oem_patch: 8 NVM patch number
577 * @ver_hi: high 16 bits of the NVM version
578 * @ver_lo: low 16 bits of the NVM version
581 ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
582 u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
584 struct ice_nvm_info *nvm = &hw->nvm;
586 *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
587 *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
588 *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
589 ICE_OEM_VER_BUILD_SHIFT);
590 *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
591 *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
595 * ice_print_rollback_msg - print FW rollback message
596 * @hw: pointer to the hardware structure
598 void ice_print_rollback_msg(struct ice_hw *hw)
600 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
601 u8 oem_ver, oem_patch, ver_hi, ver_lo;
604 ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
606 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d", ver_hi,
607 ver_lo, hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
609 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
610 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
614 * ice_init_hw - main hardware initialization routine
615 * @hw: pointer to the hardware structure
617 enum ice_status ice_init_hw(struct ice_hw *hw)
619 struct ice_aqc_get_phy_caps_data *pcaps;
620 enum ice_status status;
624 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
626 /* Set MAC type based on DeviceID */
627 status = ice_set_mac_type(hw);
631 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
632 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
633 PF_FUNC_RID_FUNCTION_NUMBER_S;
635 status = ice_reset(hw, ICE_RESET_PFR);
639 ice_get_itr_intrl_gran(hw);
641 status = ice_create_all_ctrlq(hw);
643 goto err_unroll_cqinit;
645 status = ice_init_nvm(hw);
647 goto err_unroll_cqinit;
649 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
650 ice_print_rollback_msg(hw);
652 status = ice_clear_pf_cfg(hw);
654 goto err_unroll_cqinit;
656 /* Set bit to enable Flow Director filters */
657 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
658 INIT_LIST_HEAD(&hw->fdir_list_head);
660 ice_clear_pxe_mode(hw);
662 status = ice_get_caps(hw);
664 goto err_unroll_cqinit;
666 hw->port_info = (struct ice_port_info *)
667 ice_malloc(hw, sizeof(*hw->port_info));
668 if (!hw->port_info) {
669 status = ICE_ERR_NO_MEMORY;
670 goto err_unroll_cqinit;
673 /* set the back pointer to HW */
674 hw->port_info->hw = hw;
676 /* Initialize port_info struct with switch configuration data */
677 status = ice_get_initial_sw_cfg(hw);
679 goto err_unroll_alloc;
682 /* Query the allocated resources for Tx scheduler */
683 status = ice_sched_query_res_alloc(hw);
685 ice_debug(hw, ICE_DBG_SCHED,
686 "Failed to get scheduler allocated resources\n");
687 goto err_unroll_alloc;
689 ice_sched_get_psm_clk_freq(hw);
691 /* Initialize port_info struct with scheduler data */
692 status = ice_sched_init_port(hw->port_info);
694 goto err_unroll_sched;
696 pcaps = (struct ice_aqc_get_phy_caps_data *)
697 ice_malloc(hw, sizeof(*pcaps));
699 status = ICE_ERR_NO_MEMORY;
700 goto err_unroll_sched;
703 /* Initialize port_info struct with PHY capabilities */
704 status = ice_aq_get_phy_caps(hw->port_info, false,
705 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
708 goto err_unroll_sched;
710 /* Initialize port_info struct with link information */
711 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
713 goto err_unroll_sched;
714 /* need a valid SW entry point to build a Tx tree */
715 if (!hw->sw_entry_point_layer) {
716 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
717 status = ICE_ERR_CFG;
718 goto err_unroll_sched;
720 INIT_LIST_HEAD(&hw->agg_list);
721 /* Initialize max burst size */
722 if (!hw->max_burst_size)
723 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
725 status = ice_init_fltr_mgmt_struct(hw);
727 goto err_unroll_sched;
729 /* Get MAC information */
730 /* A single port can report up to two (LAN and WoL) addresses */
731 mac_buf = ice_calloc(hw, 2,
732 sizeof(struct ice_aqc_manage_mac_read_resp));
733 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
736 status = ICE_ERR_NO_MEMORY;
737 goto err_unroll_fltr_mgmt_struct;
740 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
741 ice_free(hw, mac_buf);
744 goto err_unroll_fltr_mgmt_struct;
745 /* Obtain counter base index which would be used by flow director */
746 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
748 goto err_unroll_fltr_mgmt_struct;
749 status = ice_init_hw_tbls(hw);
751 goto err_unroll_fltr_mgmt_struct;
754 err_unroll_fltr_mgmt_struct:
755 ice_cleanup_fltr_mgmt_struct(hw);
757 ice_sched_cleanup_all(hw);
759 ice_free(hw, hw->port_info);
760 hw->port_info = NULL;
762 ice_destroy_all_ctrlq(hw);
767 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
768 * @hw: pointer to the hardware structure
770 * This should be called only during nominal operation, not as a result of
771 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
772 * applicable initializations if it fails for any reason.
774 void ice_deinit_hw(struct ice_hw *hw)
776 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
777 ice_cleanup_fltr_mgmt_struct(hw);
779 ice_sched_cleanup_all(hw);
780 ice_sched_clear_agg(hw);
782 ice_free_hw_tbls(hw);
785 ice_free(hw, hw->port_info);
786 hw->port_info = NULL;
789 ice_destroy_all_ctrlq(hw);
791 /* Clear VSI contexts if not already cleared */
792 ice_clear_all_vsi_ctx(hw);
796 * ice_check_reset - Check to see if a global reset is complete
797 * @hw: pointer to the hardware structure
799 enum ice_status ice_check_reset(struct ice_hw *hw)
801 u32 cnt, reg = 0, grst_delay, uld_mask;
803 /* Poll for Device Active state in case a recent CORER, GLOBR,
804 * or EMPR has occurred. The grst delay value is in 100ms units.
805 * Add 1sec for outstanding AQ commands that can take a long time.
807 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
808 GLGEN_RSTCTL_GRSTDEL_S) + 10;
810 for (cnt = 0; cnt < grst_delay; cnt++) {
811 ice_msec_delay(100, true);
812 reg = rd32(hw, GLGEN_RSTAT);
813 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
817 if (cnt == grst_delay) {
818 ice_debug(hw, ICE_DBG_INIT,
819 "Global reset polling failed to complete.\n");
820 return ICE_ERR_RESET_FAILED;
823 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
824 GLNVM_ULD_PCIER_DONE_1_M |\
825 GLNVM_ULD_CORER_DONE_M |\
826 GLNVM_ULD_GLOBR_DONE_M |\
827 GLNVM_ULD_POR_DONE_M |\
828 GLNVM_ULD_POR_DONE_1_M |\
829 GLNVM_ULD_PCIER_DONE_2_M)
831 uld_mask = ICE_RESET_DONE_MASK;
833 /* Device is Active; check Global Reset processes are done */
834 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
835 reg = rd32(hw, GLNVM_ULD) & uld_mask;
836 if (reg == uld_mask) {
837 ice_debug(hw, ICE_DBG_INIT,
838 "Global reset processes done. %d\n", cnt);
841 ice_msec_delay(10, true);
844 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
845 ice_debug(hw, ICE_DBG_INIT,
846 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
848 return ICE_ERR_RESET_FAILED;
855 * ice_pf_reset - Reset the PF
856 * @hw: pointer to the hardware structure
858 * If a global reset has been triggered, this function checks
859 * for its completion and then issues the PF reset
861 static enum ice_status ice_pf_reset(struct ice_hw *hw)
865 /* If at function entry a global reset was already in progress, i.e.
866 * state is not 'device active' or any of the reset done bits are not
867 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
868 * global reset is done.
870 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
871 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
872 /* poll on global reset currently in progress until done */
873 if (ice_check_reset(hw))
874 return ICE_ERR_RESET_FAILED;
880 reg = rd32(hw, PFGEN_CTRL);
882 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
884 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
885 reg = rd32(hw, PFGEN_CTRL);
886 if (!(reg & PFGEN_CTRL_PFSWR_M))
889 ice_msec_delay(1, true);
892 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
893 ice_debug(hw, ICE_DBG_INIT,
894 "PF reset polling failed to complete.\n");
895 return ICE_ERR_RESET_FAILED;
902 * ice_reset - Perform different types of reset
903 * @hw: pointer to the hardware structure
904 * @req: reset request
906 * This function triggers a reset as specified by the req parameter.
909 * If anything other than a PF reset is triggered, PXE mode is restored.
910 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
911 * interface has been restored in the rebuild flow.
913 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
919 return ice_pf_reset(hw);
920 case ICE_RESET_CORER:
921 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
922 val = GLGEN_RTRIG_CORER_M;
924 case ICE_RESET_GLOBR:
925 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
926 val = GLGEN_RTRIG_GLOBR_M;
929 return ICE_ERR_PARAM;
932 val |= rd32(hw, GLGEN_RTRIG);
933 wr32(hw, GLGEN_RTRIG, val);
936 /* wait for the FW to be ready */
937 return ice_check_reset(hw);
941 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
942 * @hw: pointer to hardware structure
943 * @module_tlv: pointer to module TLV to return
944 * @module_tlv_len: pointer to module TLV length to return
945 * @module_type: module type requested
947 * Finds the requested sub module TLV type from the Preserved Field
948 * Area (PFA) and returns the TLV pointer and length. The caller can
949 * use these to read the variable length TLV value.
952 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
955 enum ice_status status;
956 u16 pfa_len, pfa_ptr;
959 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
960 if (status != ICE_SUCCESS) {
961 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
964 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
965 if (status != ICE_SUCCESS) {
966 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
969 /* Starting with first TLV after PFA length, iterate through the list
970 * of TLVs to find the requested one.
972 next_tlv = pfa_ptr + 1;
973 while (next_tlv < pfa_ptr + pfa_len) {
974 u16 tlv_sub_module_type;
978 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
979 if (status != ICE_SUCCESS) {
980 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
983 /* Read TLV length */
984 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
985 if (status != ICE_SUCCESS) {
986 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
989 if (tlv_sub_module_type == module_type) {
991 *module_tlv = next_tlv;
992 *module_tlv_len = tlv_len;
995 return ICE_ERR_INVAL_SIZE;
997 /* Check next TLV, i.e. current TLV pointer + length + 2 words
998 * (for current TLV's type and length)
1000 next_tlv = next_tlv + tlv_len + 2;
1002 /* Module does not exist */
1003 return ICE_ERR_DOES_NOT_EXIST;
1007 * ice_copy_rxq_ctx_to_hw
1008 * @hw: pointer to the hardware structure
1009 * @ice_rxq_ctx: pointer to the rxq context
1010 * @rxq_index: the index of the Rx queue
1012 * Copies rxq context from dense structure to HW register space
1014 static enum ice_status
1015 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1020 return ICE_ERR_BAD_PTR;
1022 if (rxq_index > QRX_CTRL_MAX_INDEX)
1023 return ICE_ERR_PARAM;
1025 /* Copy each dword separately to HW */
1026 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1027 wr32(hw, QRX_CONTEXT(i, rxq_index),
1028 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1030 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1031 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1037 /* LAN Rx Queue Context */
1038 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1039 /* Field Width LSB */
1040 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1041 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1042 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1043 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1044 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1045 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1046 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1047 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1048 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1049 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1050 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1051 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1052 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1053 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1054 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1055 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1056 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1057 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1058 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1059 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1065 * @hw: pointer to the hardware structure
1066 * @rlan_ctx: pointer to the rxq context
1067 * @rxq_index: the index of the Rx queue
1069 * Converts rxq context from sparse to dense structure and then writes
1070 * it to HW register space and enables the hardware to prefetch descriptors
1071 * instead of only fetching them on demand
1074 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1077 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1080 return ICE_ERR_BAD_PTR;
1082 rlan_ctx->prefena = 1;
1084 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1085 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1090 * @hw: pointer to the hardware structure
1091 * @rxq_index: the index of the Rx queue to clear
1093 * Clears rxq context in HW register space
1095 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1099 if (rxq_index > QRX_CTRL_MAX_INDEX)
1100 return ICE_ERR_PARAM;
1102 /* Clear each dword register separately */
1103 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1104 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1109 /* LAN Tx Queue Context */
1110 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1111 /* Field Width LSB */
1112 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1113 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1114 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1115 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1116 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1117 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1118 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1119 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1120 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1121 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1122 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1123 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1124 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1125 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1126 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1127 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1128 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1129 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1130 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1131 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1132 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1133 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1134 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1135 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1136 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1137 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1138 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1139 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1144 * ice_copy_tx_cmpltnq_ctx_to_hw
1145 * @hw: pointer to the hardware structure
1146 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1147 * @tx_cmpltnq_index: the index of the completion queue
1149 * Copies Tx completion queue context from dense structure to HW register space
1151 static enum ice_status
1152 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1153 u32 tx_cmpltnq_index)
1157 if (!ice_tx_cmpltnq_ctx)
1158 return ICE_ERR_BAD_PTR;
1160 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1161 return ICE_ERR_PARAM;
1163 /* Copy each dword separately to HW */
1164 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1165 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1166 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1168 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1169 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1175 /* LAN Tx Completion Queue Context */
1176 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1177 /* Field Width LSB */
1178 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1179 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1180 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1181 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1182 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1183 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1184 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1185 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1186 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1187 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1192 * ice_write_tx_cmpltnq_ctx
1193 * @hw: pointer to the hardware structure
1194 * @tx_cmpltnq_ctx: pointer to the completion queue context
1195 * @tx_cmpltnq_index: the index of the completion queue
1197 * Converts completion queue context from sparse to dense structure and then
1198 * writes it to HW register space
1201 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1202 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1203 u32 tx_cmpltnq_index)
1205 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1207 ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1208 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1212 * ice_clear_tx_cmpltnq_ctx
1213 * @hw: pointer to the hardware structure
1214 * @tx_cmpltnq_index: the index of the completion queue to clear
1216 * Clears Tx completion queue context in HW register space
1219 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1223 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1224 return ICE_ERR_PARAM;
1226 /* Clear each dword register separately */
1227 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1228 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1234 * ice_copy_tx_drbell_q_ctx_to_hw
1235 * @hw: pointer to the hardware structure
1236 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1237 * @tx_drbell_q_index: the index of the doorbell queue
1239 * Copies doorbell queue context from dense structure to HW register space
1241 static enum ice_status
1242 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1243 u32 tx_drbell_q_index)
1247 if (!ice_tx_drbell_q_ctx)
1248 return ICE_ERR_BAD_PTR;
1250 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1251 return ICE_ERR_PARAM;
1253 /* Copy each dword separately to HW */
1254 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1255 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1256 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1258 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1259 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1265 /* LAN Tx Doorbell Queue Context info */
1266 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1267 /* Field Width LSB */
1268 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1269 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1270 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1271 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1272 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1273 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1274 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1275 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1276 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1277 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1278 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1283 * ice_write_tx_drbell_q_ctx
1284 * @hw: pointer to the hardware structure
1285 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1286 * @tx_drbell_q_index: the index of the doorbell queue
1288 * Converts doorbell queue context from sparse to dense structure and then
1289 * writes it to HW register space
1292 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1293 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1294 u32 tx_drbell_q_index)
1296 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1298 ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info);
1299 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1303 * ice_clear_tx_drbell_q_ctx
1304 * @hw: pointer to the hardware structure
1305 * @tx_drbell_q_index: the index of the doorbell queue to clear
1307 * Clears doorbell queue context in HW register space
1310 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1314 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1315 return ICE_ERR_PARAM;
1317 /* Clear each dword register separately */
1318 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1319 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1324 /* FW Admin Queue command wrappers */
1327 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1328 * @hw: pointer to the HW struct
1329 * @desc: descriptor describing the command
1330 * @buf: buffer to use for indirect commands (NULL for direct commands)
1331 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1332 * @cd: pointer to command details structure
1334 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1337 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1338 u16 buf_size, struct ice_sq_cd *cd)
1340 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1345 * @hw: pointer to the HW struct
1346 * @cd: pointer to command details structure or NULL
1348 * Get the firmware version (0x0001) from the admin queue commands
1350 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1352 struct ice_aqc_get_ver *resp;
1353 struct ice_aq_desc desc;
1354 enum ice_status status;
1356 resp = &desc.params.get_ver;
1358 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1360 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1363 hw->fw_branch = resp->fw_branch;
1364 hw->fw_maj_ver = resp->fw_major;
1365 hw->fw_min_ver = resp->fw_minor;
1366 hw->fw_patch = resp->fw_patch;
1367 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1368 hw->api_branch = resp->api_branch;
1369 hw->api_maj_ver = resp->api_major;
1370 hw->api_min_ver = resp->api_minor;
1371 hw->api_patch = resp->api_patch;
1378 * ice_aq_send_driver_ver
1379 * @hw: pointer to the HW struct
1380 * @dv: driver's major, minor version
1381 * @cd: pointer to command details structure or NULL
1383 * Send the driver version (0x0002) to the firmware
1386 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1387 struct ice_sq_cd *cd)
1389 struct ice_aqc_driver_ver *cmd;
1390 struct ice_aq_desc desc;
1393 cmd = &desc.params.driver_ver;
1396 return ICE_ERR_PARAM;
1398 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1400 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1401 cmd->major_ver = dv->major_ver;
1402 cmd->minor_ver = dv->minor_ver;
1403 cmd->build_ver = dv->build_ver;
1404 cmd->subbuild_ver = dv->subbuild_ver;
1407 while (len < sizeof(dv->driver_string) &&
1408 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1411 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1416 * @hw: pointer to the HW struct
1417 * @unloading: is the driver unloading itself
1419 * Tell the Firmware that we're shutting down the AdminQ and whether
1420 * or not the driver is unloading as well (0x0003).
1422 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1424 struct ice_aqc_q_shutdown *cmd;
1425 struct ice_aq_desc desc;
1427 cmd = &desc.params.q_shutdown;
1429 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1432 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1434 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1439 * @hw: pointer to the HW struct
1441 * @access: access type
1442 * @sdp_number: resource number
1443 * @timeout: the maximum time in ms that the driver may hold the resource
1444 * @cd: pointer to command details structure or NULL
1446 * Requests common resource using the admin queue commands (0x0008).
1447 * When attempting to acquire the Global Config Lock, the driver can
1448 * learn of three states:
1449 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1450 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1451 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1452 * successfully downloaded the package; the driver does
1453 * not have to download the package and can continue
1456 * Note that if the caller is in an acquire lock, perform action, release lock
1457 * phase of operation, it is possible that the FW may detect a timeout and issue
1458 * a CORER. In this case, the driver will receive a CORER interrupt and will
1459 * have to determine its cause. The calling thread that is handling this flow
1460 * will likely get an error propagated back to it indicating the Download
1461 * Package, Update Package or the Release Resource AQ commands timed out.
1463 static enum ice_status
1464 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1465 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1466 struct ice_sq_cd *cd)
1468 struct ice_aqc_req_res *cmd_resp;
1469 struct ice_aq_desc desc;
1470 enum ice_status status;
1472 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1474 cmd_resp = &desc.params.res_owner;
1476 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1478 cmd_resp->res_id = CPU_TO_LE16(res);
1479 cmd_resp->access_type = CPU_TO_LE16(access);
1480 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1481 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1484 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1486 /* The completion specifies the maximum time in ms that the driver
1487 * may hold the resource in the Timeout field.
1490 /* Global config lock response utilizes an additional status field.
1492 * If the Global config lock resource is held by some other driver, the
1493 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1494 * and the timeout field indicates the maximum time the current owner
1495 * of the resource has to free it.
1497 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1498 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1499 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1501 } else if (LE16_TO_CPU(cmd_resp->status) ==
1502 ICE_AQ_RES_GLBL_IN_PROG) {
1503 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1504 return ICE_ERR_AQ_ERROR;
1505 } else if (LE16_TO_CPU(cmd_resp->status) ==
1506 ICE_AQ_RES_GLBL_DONE) {
1507 return ICE_ERR_AQ_NO_WORK;
1510 /* invalid FW response, force a timeout immediately */
1512 return ICE_ERR_AQ_ERROR;
1515 /* If the resource is held by some other driver, the command completes
1516 * with a busy return value and the timeout field indicates the maximum
1517 * time the current owner of the resource has to free it.
1519 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1520 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1526 * ice_aq_release_res
1527 * @hw: pointer to the HW struct
1529 * @sdp_number: resource number
1530 * @cd: pointer to command details structure or NULL
1532 * release common resource using the admin queue commands (0x0009)
1534 static enum ice_status
1535 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1536 struct ice_sq_cd *cd)
1538 struct ice_aqc_req_res *cmd;
1539 struct ice_aq_desc desc;
1541 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1543 cmd = &desc.params.res_owner;
1545 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1547 cmd->res_id = CPU_TO_LE16(res);
1548 cmd->res_number = CPU_TO_LE32(sdp_number);
1550 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1555 * @hw: pointer to the HW structure
1557 * @access: access type (read or write)
1558 * @timeout: timeout in milliseconds
1560 * This function will attempt to acquire the ownership of a resource.
1563 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1564 enum ice_aq_res_access_type access, u32 timeout)
1566 #define ICE_RES_POLLING_DELAY_MS 10
1567 u32 delay = ICE_RES_POLLING_DELAY_MS;
1568 u32 time_left = timeout;
1569 enum ice_status status;
1571 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1573 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1575 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1576 * previously acquired the resource and performed any necessary updates;
1577 * in this case the caller does not obtain the resource and has no
1578 * further work to do.
1580 if (status == ICE_ERR_AQ_NO_WORK)
1581 goto ice_acquire_res_exit;
1584 ice_debug(hw, ICE_DBG_RES,
1585 "resource %d acquire type %d failed.\n", res, access);
1587 /* If necessary, poll until the current lock owner timeouts */
1588 timeout = time_left;
1589 while (status && timeout && time_left) {
1590 ice_msec_delay(delay, true);
1591 timeout = (timeout > delay) ? timeout - delay : 0;
1592 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1594 if (status == ICE_ERR_AQ_NO_WORK)
1595 /* lock free, but no work to do */
1602 if (status && status != ICE_ERR_AQ_NO_WORK)
1603 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1605 ice_acquire_res_exit:
1606 if (status == ICE_ERR_AQ_NO_WORK) {
1607 if (access == ICE_RES_WRITE)
1608 ice_debug(hw, ICE_DBG_RES,
1609 "resource indicates no work to do.\n");
1611 ice_debug(hw, ICE_DBG_RES,
1612 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1619 * @hw: pointer to the HW structure
1622 * This function will release a resource using the proper Admin Command.
1624 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1626 enum ice_status status;
1627 u32 total_delay = 0;
1629 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1631 status = ice_aq_release_res(hw, res, 0, NULL);
1633 /* there are some rare cases when trying to release the resource
1634 * results in an admin queue timeout, so handle them correctly
1636 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1637 (total_delay < hw->adminq.sq_cmd_timeout)) {
1638 ice_msec_delay(1, true);
1639 status = ice_aq_release_res(hw, res, 0, NULL);
1645 * ice_aq_alloc_free_res - command to allocate/free resources
1646 * @hw: pointer to the HW struct
1647 * @num_entries: number of resource entries in buffer
1648 * @buf: Indirect buffer to hold data parameters and response
1649 * @buf_size: size of buffer for indirect commands
1650 * @opc: pass in the command opcode
1651 * @cd: pointer to command details structure or NULL
1653 * Helper function to allocate/free resources using the admin queue commands
1656 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1657 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1658 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1660 struct ice_aqc_alloc_free_res_cmd *cmd;
1661 struct ice_aq_desc desc;
1663 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1665 cmd = &desc.params.sw_res_ctrl;
1668 return ICE_ERR_PARAM;
1670 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1671 return ICE_ERR_PARAM;
1673 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1675 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1677 cmd->num_entries = CPU_TO_LE16(num_entries);
1679 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1683 * ice_alloc_hw_res - allocate resource
1684 * @hw: pointer to the HW struct
1685 * @type: type of resource
1686 * @num: number of resources to allocate
1687 * @btm: allocate from bottom
1688 * @res: pointer to array that will receive the resources
1691 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1693 struct ice_aqc_alloc_free_res_elem *buf;
1694 enum ice_status status;
1697 buf_len = ice_struct_size(buf, elem, num - 1);
1698 buf = (struct ice_aqc_alloc_free_res_elem *)
1699 ice_malloc(hw, buf_len);
1701 return ICE_ERR_NO_MEMORY;
1703 /* Prepare buffer to allocate resource. */
1704 buf->num_elems = CPU_TO_LE16(num);
1705 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1706 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1708 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1710 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1711 ice_aqc_opc_alloc_res, NULL);
1713 goto ice_alloc_res_exit;
1715 ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
1716 ICE_NONDMA_TO_NONDMA);
1724 * ice_free_hw_res - free allocated HW resource
1725 * @hw: pointer to the HW struct
1726 * @type: type of resource to free
1727 * @num: number of resources
1728 * @res: pointer to array that contains the resources to free
1731 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1733 struct ice_aqc_alloc_free_res_elem *buf;
1734 enum ice_status status;
1737 buf_len = ice_struct_size(buf, elem, num - 1);
1738 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1740 return ICE_ERR_NO_MEMORY;
1742 /* Prepare buffer to free resource. */
1743 buf->num_elems = CPU_TO_LE16(num);
1744 buf->res_type = CPU_TO_LE16(type);
1745 ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
1746 ICE_NONDMA_TO_NONDMA);
1748 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1749 ice_aqc_opc_free_res, NULL);
1751 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1758 * ice_get_num_per_func - determine number of resources per PF
1759 * @hw: pointer to the HW structure
1760 * @max: value to be evenly split between each PF
1762 * Determine the number of valid functions by going through the bitmap returned
1763 * from parsing capabilities and use this to calculate the number of resources
1764 * per PF based on the max value passed in.
1766 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1770 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1771 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1772 ICE_CAPS_VALID_FUNCS_M);
1781 * ice_parse_caps - parse function/device capabilities
1782 * @hw: pointer to the HW struct
1783 * @buf: pointer to a buffer containing function/device capability records
1784 * @cap_count: number of capability records in the list
1785 * @opc: type of capabilities list to parse
1787 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1790 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1791 enum ice_adminq_opc opc)
1793 struct ice_aqc_list_caps_elem *cap_resp;
1794 struct ice_hw_func_caps *func_p = NULL;
1795 struct ice_hw_dev_caps *dev_p = NULL;
1796 struct ice_hw_common_caps *caps;
1803 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1805 if (opc == ice_aqc_opc_list_dev_caps) {
1806 dev_p = &hw->dev_caps;
1807 caps = &dev_p->common_cap;
1809 } else if (opc == ice_aqc_opc_list_func_caps) {
1810 func_p = &hw->func_caps;
1811 caps = &func_p->common_cap;
1812 prefix = "func cap";
1814 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1818 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1819 u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
1820 u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
1821 u32 number = LE32_TO_CPU(cap_resp->number);
1822 u16 cap = LE16_TO_CPU(cap_resp->cap);
1825 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1826 caps->valid_functions = number;
1827 ice_debug(hw, ICE_DBG_INIT,
1828 "%s: valid_functions (bitmap) = %d\n", prefix,
1829 caps->valid_functions);
1831 /* store func count for resource management purposes */
1833 dev_p->num_funcs = ice_hweight32(number);
1835 case ICE_AQC_CAPS_VSI:
1837 dev_p->num_vsi_allocd_to_host = number;
1838 ice_debug(hw, ICE_DBG_INIT,
1839 "%s: num_vsi_allocd_to_host = %d\n",
1841 dev_p->num_vsi_allocd_to_host);
1842 } else if (func_p) {
1843 func_p->guar_num_vsi =
1844 ice_get_num_per_func(hw, ICE_MAX_VSI);
1845 ice_debug(hw, ICE_DBG_INIT,
1846 "%s: guar_num_vsi (fw) = %d\n",
1848 ice_debug(hw, ICE_DBG_INIT,
1849 "%s: guar_num_vsi = %d\n",
1850 prefix, func_p->guar_num_vsi);
1853 case ICE_AQC_CAPS_DCB:
1854 caps->dcb = (number == 1);
1855 caps->active_tc_bitmap = logical_id;
1856 caps->maxtc = phys_id;
1857 ice_debug(hw, ICE_DBG_INIT,
1858 "%s: dcb = %d\n", prefix, caps->dcb);
1859 ice_debug(hw, ICE_DBG_INIT,
1860 "%s: active_tc_bitmap = %d\n", prefix,
1861 caps->active_tc_bitmap);
1862 ice_debug(hw, ICE_DBG_INIT,
1863 "%s: maxtc = %d\n", prefix, caps->maxtc);
1865 case ICE_AQC_CAPS_RSS:
1866 caps->rss_table_size = number;
1867 caps->rss_table_entry_width = logical_id;
1868 ice_debug(hw, ICE_DBG_INIT,
1869 "%s: rss_table_size = %d\n", prefix,
1870 caps->rss_table_size);
1871 ice_debug(hw, ICE_DBG_INIT,
1872 "%s: rss_table_entry_width = %d\n", prefix,
1873 caps->rss_table_entry_width);
1875 case ICE_AQC_CAPS_RXQS:
1876 caps->num_rxq = number;
1877 caps->rxq_first_id = phys_id;
1878 ice_debug(hw, ICE_DBG_INIT,
1879 "%s: num_rxq = %d\n", prefix,
1881 ice_debug(hw, ICE_DBG_INIT,
1882 "%s: rxq_first_id = %d\n", prefix,
1883 caps->rxq_first_id);
1885 case ICE_AQC_CAPS_TXQS:
1886 caps->num_txq = number;
1887 caps->txq_first_id = phys_id;
1888 ice_debug(hw, ICE_DBG_INIT,
1889 "%s: num_txq = %d\n", prefix,
1891 ice_debug(hw, ICE_DBG_INIT,
1892 "%s: txq_first_id = %d\n", prefix,
1893 caps->txq_first_id);
1895 case ICE_AQC_CAPS_MSIX:
1896 caps->num_msix_vectors = number;
1897 caps->msix_vector_first_id = phys_id;
1898 ice_debug(hw, ICE_DBG_INIT,
1899 "%s: num_msix_vectors = %d\n", prefix,
1900 caps->num_msix_vectors);
1901 ice_debug(hw, ICE_DBG_INIT,
1902 "%s: msix_vector_first_id = %d\n", prefix,
1903 caps->msix_vector_first_id);
1905 case ICE_AQC_CAPS_FD:
1910 dev_p->num_flow_director_fltr = number;
1911 ice_debug(hw, ICE_DBG_INIT,
1912 "%s: num_flow_director_fltr = %d\n",
1914 dev_p->num_flow_director_fltr);
1917 reg_val = rd32(hw, GLQF_FD_SIZE);
1918 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1919 GLQF_FD_SIZE_FD_GSIZE_S;
1920 func_p->fd_fltr_guar =
1921 ice_get_num_per_func(hw, val);
1922 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1923 GLQF_FD_SIZE_FD_BSIZE_S;
1924 func_p->fd_fltr_best_effort = val;
1925 ice_debug(hw, ICE_DBG_INIT,
1926 "%s: fd_fltr_guar = %d\n",
1927 prefix, func_p->fd_fltr_guar);
1928 ice_debug(hw, ICE_DBG_INIT,
1929 "%s: fd_fltr_best_effort = %d\n",
1930 prefix, func_p->fd_fltr_best_effort);
1934 case ICE_AQC_CAPS_MAX_MTU:
1935 caps->max_mtu = number;
1936 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1937 prefix, caps->max_mtu);
1940 ice_debug(hw, ICE_DBG_INIT,
1941 "%s: unknown capability[%d]: 0x%x\n", prefix,
1947 /* Re-calculate capabilities that are dependent on the number of
1948 * physical ports; i.e. some features are not supported or function
1949 * differently on devices with more than 4 ports.
1951 if (hw->dev_caps.num_funcs > 4) {
1952 /* Max 4 TCs per port */
1954 ice_debug(hw, ICE_DBG_INIT,
1955 "%s: maxtc = %d (based on #ports)\n", prefix,
1961 * ice_aq_discover_caps - query function/device capabilities
1962 * @hw: pointer to the HW struct
1963 * @buf: a virtual buffer to hold the capabilities
1964 * @buf_size: Size of the virtual buffer
1965 * @cap_count: cap count needed if AQ err==ENOMEM
1966 * @opc: capabilities type to discover - pass in the command opcode
1967 * @cd: pointer to command details structure or NULL
1969 * Get the function(0x000a)/device(0x000b) capabilities description from
1972 static enum ice_status
1973 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1974 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1976 struct ice_aqc_list_caps *cmd;
1977 struct ice_aq_desc desc;
1978 enum ice_status status;
1980 cmd = &desc.params.get_cap;
1982 if (opc != ice_aqc_opc_list_func_caps &&
1983 opc != ice_aqc_opc_list_dev_caps)
1984 return ICE_ERR_PARAM;
1986 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1988 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1990 ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc);
1991 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1992 *cap_count = LE32_TO_CPU(cmd->count);
1997 * ice_discover_caps - get info about the HW
1998 * @hw: pointer to the hardware structure
1999 * @opc: capabilities type to discover - pass in the command opcode
2001 static enum ice_status
2002 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
2004 enum ice_status status;
2009 /* The driver doesn't know how many capabilities the device will return
2010 * so the buffer size required isn't known ahead of time. The driver
2011 * starts with cbuf_len and if this turns out to be insufficient, the
2012 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
2013 * The driver then allocates the buffer based on the count and retries
2014 * the operation. So it follows that the retry count is 2.
2016 #define ICE_GET_CAP_BUF_COUNT 40
2017 #define ICE_GET_CAP_RETRY_COUNT 2
2019 cap_count = ICE_GET_CAP_BUF_COUNT;
2020 retries = ICE_GET_CAP_RETRY_COUNT;
2025 cbuf_len = (u16)(cap_count *
2026 sizeof(struct ice_aqc_list_caps_elem));
2027 cbuf = ice_malloc(hw, cbuf_len);
2029 return ICE_ERR_NO_MEMORY;
2031 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
2035 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
2038 /* If ENOMEM is returned, try again with bigger buffer */
2039 } while (--retries);
2045 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2046 * @hw: pointer to the hardware structure
2048 void ice_set_safe_mode_caps(struct ice_hw *hw)
2050 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2051 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2052 u32 valid_func, rxq_first_id, txq_first_id;
2053 u32 msix_vector_first_id, max_mtu;
2056 /* cache some func_caps values that should be restored after memset */
2057 valid_func = func_caps->common_cap.valid_functions;
2058 txq_first_id = func_caps->common_cap.txq_first_id;
2059 rxq_first_id = func_caps->common_cap.rxq_first_id;
2060 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
2061 max_mtu = func_caps->common_cap.max_mtu;
2063 /* unset func capabilities */
2064 memset(func_caps, 0, sizeof(*func_caps));
2066 /* restore cached values */
2067 func_caps->common_cap.valid_functions = valid_func;
2068 func_caps->common_cap.txq_first_id = txq_first_id;
2069 func_caps->common_cap.rxq_first_id = rxq_first_id;
2070 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2071 func_caps->common_cap.max_mtu = max_mtu;
2073 /* one Tx and one Rx queue in safe mode */
2074 func_caps->common_cap.num_rxq = 1;
2075 func_caps->common_cap.num_txq = 1;
2077 /* two MSIX vectors, one for traffic and one for misc causes */
2078 func_caps->common_cap.num_msix_vectors = 2;
2079 func_caps->guar_num_vsi = 1;
2081 /* cache some dev_caps values that should be restored after memset */
2082 valid_func = dev_caps->common_cap.valid_functions;
2083 txq_first_id = dev_caps->common_cap.txq_first_id;
2084 rxq_first_id = dev_caps->common_cap.rxq_first_id;
2085 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
2086 max_mtu = dev_caps->common_cap.max_mtu;
2087 num_funcs = dev_caps->num_funcs;
2089 /* unset dev capabilities */
2090 memset(dev_caps, 0, sizeof(*dev_caps));
2092 /* restore cached values */
2093 dev_caps->common_cap.valid_functions = valid_func;
2094 dev_caps->common_cap.txq_first_id = txq_first_id;
2095 dev_caps->common_cap.rxq_first_id = rxq_first_id;
2096 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2097 dev_caps->common_cap.max_mtu = max_mtu;
2098 dev_caps->num_funcs = num_funcs;
2100 /* one Tx and one Rx queue per function in safe mode */
2101 dev_caps->common_cap.num_rxq = num_funcs;
2102 dev_caps->common_cap.num_txq = num_funcs;
2104 /* two MSIX vectors per function */
2105 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2109 * ice_get_caps - get info about the HW
2110 * @hw: pointer to the hardware structure
2112 enum ice_status ice_get_caps(struct ice_hw *hw)
2114 enum ice_status status;
2116 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
2118 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
2124 * ice_aq_manage_mac_write - manage MAC address write command
2125 * @hw: pointer to the HW struct
2126 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2127 * @flags: flags to control write behavior
2128 * @cd: pointer to command details structure or NULL
2130 * This function is used to write MAC address to the NVM (0x0108).
2133 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2134 struct ice_sq_cd *cd)
2136 struct ice_aqc_manage_mac_write *cmd;
2137 struct ice_aq_desc desc;
2139 cmd = &desc.params.mac_write;
2140 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2144 /* Prep values for flags, sah, sal */
2145 cmd->sah = HTONS(*((const u16 *)mac_addr));
2146 cmd->sal = HTONL(*((const u32 *)(mac_addr + 2)));
2148 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2152 * ice_aq_clear_pxe_mode
2153 * @hw: pointer to the HW struct
2155 * Tell the firmware that the driver is taking over from PXE (0x0110).
2157 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2159 struct ice_aq_desc desc;
2161 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2162 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2164 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2168 * ice_clear_pxe_mode - clear pxe operations mode
2169 * @hw: pointer to the HW struct
2171 * Make sure all PXE mode settings are cleared, including things
2172 * like descriptor fetch/write-back mode.
2174 void ice_clear_pxe_mode(struct ice_hw *hw)
2176 if (ice_check_sq_alive(hw, &hw->adminq))
2177 ice_aq_clear_pxe_mode(hw);
2181 * ice_get_link_speed_based_on_phy_type - returns link speed
2182 * @phy_type_low: lower part of phy_type
2183 * @phy_type_high: higher part of phy_type
2185 * This helper function will convert an entry in PHY type structure
2186 * [phy_type_low, phy_type_high] to its corresponding link speed.
2187 * Note: In the structure of [phy_type_low, phy_type_high], there should
2188 * be one bit set, as this function will convert one PHY type to its
2190 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2191 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2194 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2196 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2197 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2199 switch (phy_type_low) {
2200 case ICE_PHY_TYPE_LOW_100BASE_TX:
2201 case ICE_PHY_TYPE_LOW_100M_SGMII:
2202 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2204 case ICE_PHY_TYPE_LOW_1000BASE_T:
2205 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2206 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2207 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2208 case ICE_PHY_TYPE_LOW_1G_SGMII:
2209 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2211 case ICE_PHY_TYPE_LOW_2500BASE_T:
2212 case ICE_PHY_TYPE_LOW_2500BASE_X:
2213 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2214 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2216 case ICE_PHY_TYPE_LOW_5GBASE_T:
2217 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2218 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2220 case ICE_PHY_TYPE_LOW_10GBASE_T:
2221 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2222 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2223 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2224 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2225 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2226 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2227 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2229 case ICE_PHY_TYPE_LOW_25GBASE_T:
2230 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2231 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2232 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2233 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2234 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2235 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2236 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2237 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2238 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2239 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2240 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2242 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2243 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2244 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2245 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2246 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2247 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2248 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2250 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2251 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2252 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2253 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2254 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2255 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2256 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2257 case ICE_PHY_TYPE_LOW_50G_AUI2:
2258 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2259 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2260 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2261 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2262 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2263 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2264 case ICE_PHY_TYPE_LOW_50G_AUI1:
2265 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2267 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2268 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2269 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2270 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2271 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2272 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2273 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2274 case ICE_PHY_TYPE_LOW_100G_AUI4:
2275 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2276 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2277 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2278 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2279 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2280 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2283 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2287 switch (phy_type_high) {
2288 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2289 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2290 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2291 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2292 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2293 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2296 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2300 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2301 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2302 return ICE_AQ_LINK_SPEED_UNKNOWN;
2303 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2304 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2305 return ICE_AQ_LINK_SPEED_UNKNOWN;
2306 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2307 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2308 return speed_phy_type_low;
2310 return speed_phy_type_high;
2314 * ice_update_phy_type
2315 * @phy_type_low: pointer to the lower part of phy_type
2316 * @phy_type_high: pointer to the higher part of phy_type
2317 * @link_speeds_bitmap: targeted link speeds bitmap
2319 * Note: For the link_speeds_bitmap structure, you can check it at
2320 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2321 * link_speeds_bitmap include multiple speeds.
2323 * Each entry in this [phy_type_low, phy_type_high] structure will
2324 * present a certain link speed. This helper function will turn on bits
2325 * in [phy_type_low, phy_type_high] structure based on the value of
2326 * link_speeds_bitmap input parameter.
2329 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2330 u16 link_speeds_bitmap)
2337 /* We first check with low part of phy_type */
2338 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2339 pt_low = BIT_ULL(index);
2340 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2342 if (link_speeds_bitmap & speed)
2343 *phy_type_low |= BIT_ULL(index);
2346 /* We then check with high part of phy_type */
2347 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2348 pt_high = BIT_ULL(index);
2349 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2351 if (link_speeds_bitmap & speed)
2352 *phy_type_high |= BIT_ULL(index);
2357 * ice_aq_set_phy_cfg
2358 * @hw: pointer to the HW struct
2359 * @pi: port info structure of the interested logical port
2360 * @cfg: structure with PHY configuration data to be set
2361 * @cd: pointer to command details structure or NULL
2363 * Set the various PHY configuration parameters supported on the Port.
2364 * One or more of the Set PHY config parameters may be ignored in an MFP
2365 * mode as the PF may not have the privilege to set some of the PHY Config
2366 * parameters. This status will be indicated by the command response (0x0601).
2369 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2370 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2372 struct ice_aq_desc desc;
2373 enum ice_status status;
2376 return ICE_ERR_PARAM;
2378 /* Ensure that only valid bits of cfg->caps can be turned on. */
2379 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2380 ice_debug(hw, ICE_DBG_PHY,
2381 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2384 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2387 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2388 desc.params.set_phy.lport_num = pi->lport;
2389 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2391 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2392 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2393 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2394 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2395 ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2396 ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
2397 cfg->low_power_ctrl);
2398 ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2399 ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2400 ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2402 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2405 pi->phy.curr_user_phy_cfg = *cfg;
2411 * ice_update_link_info - update status of the HW network link
2412 * @pi: port info structure of the interested logical port
2414 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2416 struct ice_link_status *li;
2417 enum ice_status status;
2420 return ICE_ERR_PARAM;
2422 li = &pi->phy.link_info;
2424 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2428 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2429 struct ice_aqc_get_phy_caps_data *pcaps;
2433 pcaps = (struct ice_aqc_get_phy_caps_data *)
2434 ice_malloc(hw, sizeof(*pcaps));
2436 return ICE_ERR_NO_MEMORY;
2438 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2440 if (status == ICE_SUCCESS)
2441 ice_memcpy(li->module_type, &pcaps->module_type,
2442 sizeof(li->module_type),
2443 ICE_NONDMA_TO_NONDMA);
2445 ice_free(hw, pcaps);
2452 * ice_cache_phy_user_req
2453 * @pi: port information structure
2454 * @cache_data: PHY logging data
2455 * @cache_mode: PHY logging mode
2457 * Log the user request on (FC, FEC, SPEED) for later user.
2460 ice_cache_phy_user_req(struct ice_port_info *pi,
2461 struct ice_phy_cache_mode_data cache_data,
2462 enum ice_phy_cache_mode cache_mode)
2467 switch (cache_mode) {
2469 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2471 case ICE_SPEED_MODE:
2472 pi->phy.curr_user_speed_req =
2473 cache_data.data.curr_user_speed_req;
2476 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2484 * ice_caps_to_fc_mode
2485 * @caps: PHY capabilities
2487 * Convert PHY FC capabilities to ice FC mode
2489 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2491 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2492 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2495 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2496 return ICE_FC_TX_PAUSE;
2498 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2499 return ICE_FC_RX_PAUSE;
2505 * ice_caps_to_fec_mode
2506 * @caps: PHY capabilities
2507 * @fec_options: Link FEC options
2509 * Convert PHY FEC capabilities to ice FEC mode
2511 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2513 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2514 return ICE_FEC_AUTO;
2516 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2517 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2518 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2519 ICE_AQC_PHY_FEC_25G_KR_REQ))
2520 return ICE_FEC_BASER;
2522 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2523 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2524 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2527 return ICE_FEC_NONE;
2532 * @pi: port information structure
2533 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2534 * @ena_auto_link_update: enable automatic link update
2536 * Set the requested flow control mode.
2539 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2541 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2542 struct ice_phy_cache_mode_data cache_data;
2543 struct ice_link_default_override_tlv tlv;
2544 struct ice_aqc_get_phy_caps_data *pcaps;
2545 enum ice_status status;
2546 u8 pause_mask = 0x0;
2550 return ICE_ERR_PARAM;
2552 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2554 /* Cache user FC request */
2555 cache_data.data.curr_user_fc_req = pi->fc.req_mode;
2556 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2558 switch (pi->fc.req_mode) {
2560 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2561 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2563 case ICE_FC_RX_PAUSE:
2564 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2566 case ICE_FC_TX_PAUSE:
2567 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2573 pcaps = (struct ice_aqc_get_phy_caps_data *)
2574 ice_malloc(hw, sizeof(*pcaps));
2576 return ICE_ERR_NO_MEMORY;
2578 /* Get the current PHY config */
2579 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2582 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2586 /* clear the old pause settings */
2587 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2588 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2590 /* set the new capabilities */
2591 if (pi->fc.req_mode == ICE_FC_AUTO &&
2592 ice_fw_supports_link_override(hw)) {
2593 status = ice_get_link_default_override(&tlv, pi);
2597 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
2598 (tlv.options & ICE_LINK_OVERRIDE_EN))
2599 cfg.caps |= tlv.phy_config & ICE_LINK_OVERRIDE_PAUSE_M;
2601 cfg.caps |= pause_mask;
2604 /* If the capabilities have changed, then set the new config */
2605 if (cfg.caps != pcaps->caps) {
2606 int retry_count, retry_max = 10;
2608 /* Auto restart link so settings take effect */
2609 if (ena_auto_link_update)
2610 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2611 /* Copy over all the old settings */
2612 cfg.phy_type_high = pcaps->phy_type_high;
2613 cfg.phy_type_low = pcaps->phy_type_low;
2614 cfg.low_power_ctrl = pcaps->low_power_ctrl;
2615 cfg.eee_cap = pcaps->eee_cap;
2616 cfg.eeer_value = pcaps->eeer_value;
2617 cfg.link_fec_opt = pcaps->link_fec_options;
2619 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2621 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2625 /* Update the link info
2626 * It sometimes takes a really long time for link to
2627 * come back from the atomic reset. Thus, we wait a
2630 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2631 status = ice_update_link_info(pi);
2633 if (status == ICE_SUCCESS)
2636 ice_msec_delay(100, true);
2640 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2644 ice_free(hw, pcaps);
2649 * ice_phy_caps_equals_cfg
2650 * @phy_caps: PHY capabilities
2651 * @phy_cfg: PHY configuration
2653 * Helper function to determine if PHY capabilities matches PHY
2657 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2658 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2660 u8 caps_mask, cfg_mask;
2662 if (!phy_caps || !phy_cfg)
2665 /* These bits are not common between capabilities and configuration.
2666 * Do not use them to determine equality.
2668 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2669 ICE_AQC_PHY_EN_MOD_QUAL);
2670 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2672 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2673 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2674 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2675 phy_caps->low_power_ctrl != phy_cfg->low_power_ctrl ||
2676 phy_caps->eee_cap != phy_cfg->eee_cap ||
2677 phy_caps->eeer_value != phy_cfg->eeer_value ||
2678 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2685 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2686 * @caps: PHY ability structure to copy date from
2687 * @cfg: PHY configuration structure to copy data to
2689 * Helper function to copy AQC PHY get ability data to PHY set configuration
2693 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2694 struct ice_aqc_set_phy_cfg_data *cfg)
2699 cfg->phy_type_low = caps->phy_type_low;
2700 cfg->phy_type_high = caps->phy_type_high;
2701 cfg->caps = caps->caps;
2702 cfg->low_power_ctrl = caps->low_power_ctrl;
2703 cfg->eee_cap = caps->eee_cap;
2704 cfg->eeer_value = caps->eeer_value;
2705 cfg->link_fec_opt = caps->link_fec_options;
2709 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2710 * @cfg: PHY configuration data to set FEC mode
2711 * @fec: FEC mode to configure
2713 * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2714 * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2715 * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2718 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2722 /* Clear RS bits, and AND BASE-R ability
2723 * bits and OR request bits.
2725 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2726 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2727 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2728 ICE_AQC_PHY_FEC_25G_KR_REQ;
2731 /* Clear BASE-R bits, and AND RS ability
2732 * bits and OR request bits.
2734 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2735 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2736 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2739 /* Clear all FEC option bits. */
2740 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2743 /* AND auto FEC bit, and all caps bits. */
2744 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2750 * ice_get_link_status - get status of the HW network link
2751 * @pi: port information structure
2752 * @link_up: pointer to bool (true/false = linkup/linkdown)
2754 * Variable link_up is true if link is up, false if link is down.
2755 * The variable link_up is invalid if status is non zero. As a
2756 * result of this call, link status reporting becomes enabled
2758 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2760 struct ice_phy_info *phy_info;
2761 enum ice_status status = ICE_SUCCESS;
2763 if (!pi || !link_up)
2764 return ICE_ERR_PARAM;
2766 phy_info = &pi->phy;
2768 if (phy_info->get_link_info) {
2769 status = ice_update_link_info(pi);
2772 ice_debug(pi->hw, ICE_DBG_LINK,
2773 "get link status error, status = %d\n",
2777 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2783 * ice_aq_set_link_restart_an
2784 * @pi: pointer to the port information structure
2785 * @ena_link: if true: enable link, if false: disable link
2786 * @cd: pointer to command details structure or NULL
2788 * Sets up the link and restarts the Auto-Negotiation over the link.
2791 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2792 struct ice_sq_cd *cd)
2794 struct ice_aqc_restart_an *cmd;
2795 struct ice_aq_desc desc;
2797 cmd = &desc.params.restart_an;
2799 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2801 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2802 cmd->lport_num = pi->lport;
2804 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2806 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2808 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2812 * ice_aq_set_event_mask
2813 * @hw: pointer to the HW struct
2814 * @port_num: port number of the physical function
2815 * @mask: event mask to be set
2816 * @cd: pointer to command details structure or NULL
2818 * Set event mask (0x0613)
2821 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2822 struct ice_sq_cd *cd)
2824 struct ice_aqc_set_event_mask *cmd;
2825 struct ice_aq_desc desc;
2827 cmd = &desc.params.set_event_mask;
2829 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2831 cmd->lport_num = port_num;
2833 cmd->event_mask = CPU_TO_LE16(mask);
2834 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2838 * ice_aq_set_mac_loopback
2839 * @hw: pointer to the HW struct
2840 * @ena_lpbk: Enable or Disable loopback
2841 * @cd: pointer to command details structure or NULL
2843 * Enable/disable loopback on a given port
2846 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2848 struct ice_aqc_set_mac_lb *cmd;
2849 struct ice_aq_desc desc;
2851 cmd = &desc.params.set_mac_lb;
2853 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2855 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2857 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2861 * ice_aq_set_port_id_led
2862 * @pi: pointer to the port information
2863 * @is_orig_mode: is this LED set to original mode (by the net-list)
2864 * @cd: pointer to command details structure or NULL
2866 * Set LED value for the given port (0x06e9)
2869 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2870 struct ice_sq_cd *cd)
2872 struct ice_aqc_set_port_id_led *cmd;
2873 struct ice_hw *hw = pi->hw;
2874 struct ice_aq_desc desc;
2876 cmd = &desc.params.set_port_id_led;
2878 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2881 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2883 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2885 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2890 * @hw: pointer to the HW struct
2891 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
2892 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
2893 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
2895 * @set_page: set or ignore the page
2896 * @data: pointer to data buffer to be read/written to the I2C device.
2897 * @length: 1-16 for read, 1 for write.
2898 * @write: 0 read, 1 for write.
2899 * @cd: pointer to command details structure or NULL
2901 * Read/Write SFF EEPROM (0x06EE)
2904 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
2905 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
2906 bool write, struct ice_sq_cd *cd)
2908 struct ice_aqc_sff_eeprom *cmd;
2909 struct ice_aq_desc desc;
2910 enum ice_status status;
2912 if (!data || (mem_addr & 0xff00))
2913 return ICE_ERR_PARAM;
2915 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
2916 cmd = &desc.params.read_write_sff_param;
2917 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
2918 cmd->lport_num = (u8)(lport & 0xff);
2919 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
2920 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
2921 ICE_AQC_SFF_I2CBUS_7BIT_M) |
2923 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
2924 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
2925 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
2926 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
2928 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
2930 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
2935 * __ice_aq_get_set_rss_lut
2936 * @hw: pointer to the hardware structure
2937 * @vsi_id: VSI FW index
2938 * @lut_type: LUT table type
2939 * @lut: pointer to the LUT buffer provided by the caller
2940 * @lut_size: size of the LUT buffer
2941 * @glob_lut_idx: global LUT index
2942 * @set: set true to set the table, false to get the table
2944 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2946 static enum ice_status
2947 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2948 u16 lut_size, u8 glob_lut_idx, bool set)
2950 struct ice_aqc_get_set_rss_lut *cmd_resp;
2951 struct ice_aq_desc desc;
2952 enum ice_status status;
2955 cmd_resp = &desc.params.get_set_rss_lut;
2958 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2959 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2961 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2964 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
2965 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2966 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2967 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2970 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2971 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2972 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2973 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2974 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2977 status = ICE_ERR_PARAM;
2978 goto ice_aq_get_set_rss_lut_exit;
2981 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2982 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2983 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2986 goto ice_aq_get_set_rss_lut_send;
2987 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2989 goto ice_aq_get_set_rss_lut_send;
2991 goto ice_aq_get_set_rss_lut_send;
2994 /* LUT size is only valid for Global and PF table types */
2996 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2997 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
2998 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2999 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3001 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3002 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3003 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3004 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3006 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3007 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3008 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3009 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3010 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3015 status = ICE_ERR_PARAM;
3016 goto ice_aq_get_set_rss_lut_exit;
3019 ice_aq_get_set_rss_lut_send:
3020 cmd_resp->flags = CPU_TO_LE16(flags);
3021 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3023 ice_aq_get_set_rss_lut_exit:
3028 * ice_aq_get_rss_lut
3029 * @hw: pointer to the hardware structure
3030 * @vsi_handle: software VSI handle
3031 * @lut_type: LUT table type
3032 * @lut: pointer to the LUT buffer provided by the caller
3033 * @lut_size: size of the LUT buffer
3035 * get the RSS lookup table, PF or VSI type
3038 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3039 u8 *lut, u16 lut_size)
3041 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3042 return ICE_ERR_PARAM;
3044 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3045 lut_type, lut, lut_size, 0, false);
3049 * ice_aq_set_rss_lut
3050 * @hw: pointer to the hardware structure
3051 * @vsi_handle: software VSI handle
3052 * @lut_type: LUT table type
3053 * @lut: pointer to the LUT buffer provided by the caller
3054 * @lut_size: size of the LUT buffer
3056 * set the RSS lookup table, PF or VSI type
3059 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3060 u8 *lut, u16 lut_size)
3062 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3063 return ICE_ERR_PARAM;
3065 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3066 lut_type, lut, lut_size, 0, true);
3070 * __ice_aq_get_set_rss_key
3071 * @hw: pointer to the HW struct
3072 * @vsi_id: VSI FW index
3073 * @key: pointer to key info struct
3074 * @set: set true to set the key, false to get the key
3076 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3079 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3080 struct ice_aqc_get_set_rss_keys *key,
3083 struct ice_aqc_get_set_rss_key *cmd_resp;
3084 u16 key_size = sizeof(*key);
3085 struct ice_aq_desc desc;
3087 cmd_resp = &desc.params.get_set_rss_key;
3090 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3091 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3093 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3096 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3097 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3098 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3099 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3101 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3105 * ice_aq_get_rss_key
3106 * @hw: pointer to the HW struct
3107 * @vsi_handle: software VSI handle
3108 * @key: pointer to key info struct
3110 * get the RSS key per VSI
3113 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3114 struct ice_aqc_get_set_rss_keys *key)
3116 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3117 return ICE_ERR_PARAM;
3119 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3124 * ice_aq_set_rss_key
3125 * @hw: pointer to the HW struct
3126 * @vsi_handle: software VSI handle
3127 * @keys: pointer to key info struct
3129 * set the RSS key per VSI
3132 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3133 struct ice_aqc_get_set_rss_keys *keys)
3135 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3136 return ICE_ERR_PARAM;
3138 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3143 * ice_aq_add_lan_txq
3144 * @hw: pointer to the hardware structure
3145 * @num_qgrps: Number of added queue groups
3146 * @qg_list: list of queue groups to be added
3147 * @buf_size: size of buffer for indirect command
3148 * @cd: pointer to command details structure or NULL
3150 * Add Tx LAN queue (0x0C30)
3153 * Prior to calling add Tx LAN queue:
3154 * Initialize the following as part of the Tx queue context:
3155 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3156 * Cache profile and Packet shaper profile.
3158 * After add Tx LAN queue AQ command is completed:
3159 * Interrupts should be associated with specific queues,
3160 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3164 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3165 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3166 struct ice_sq_cd *cd)
3168 u16 i, sum_header_size, sum_q_size = 0;
3169 struct ice_aqc_add_tx_qgrp *list;
3170 struct ice_aqc_add_txqs *cmd;
3171 struct ice_aq_desc desc;
3173 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3175 cmd = &desc.params.add_txqs;
3177 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3180 return ICE_ERR_PARAM;
3182 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3183 return ICE_ERR_PARAM;
3185 sum_header_size = num_qgrps *
3186 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
3189 for (i = 0; i < num_qgrps; i++) {
3190 struct ice_aqc_add_txqs_perq *q = list->txqs;
3192 sum_q_size += list->num_txqs * sizeof(*q);
3193 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
3196 if (buf_size != (sum_header_size + sum_q_size))
3197 return ICE_ERR_PARAM;
3199 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3201 cmd->num_qgrps = num_qgrps;
3203 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3207 * ice_aq_dis_lan_txq
3208 * @hw: pointer to the hardware structure
3209 * @num_qgrps: number of groups in the list
3210 * @qg_list: the list of groups to disable
3211 * @buf_size: the total size of the qg_list buffer in bytes
3212 * @rst_src: if called due to reset, specifies the reset source
3213 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3214 * @cd: pointer to command details structure or NULL
3216 * Disable LAN Tx queue (0x0C31)
3218 static enum ice_status
3219 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3220 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3221 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3222 struct ice_sq_cd *cd)
3224 struct ice_aqc_dis_txqs *cmd;
3225 struct ice_aq_desc desc;
3226 enum ice_status status;
3229 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3230 cmd = &desc.params.dis_txqs;
3231 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3233 /* qg_list can be NULL only in VM/VF reset flow */
3234 if (!qg_list && !rst_src)
3235 return ICE_ERR_PARAM;
3237 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3238 return ICE_ERR_PARAM;
3240 cmd->num_entries = num_qgrps;
3242 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3243 ICE_AQC_Q_DIS_TIMEOUT_M);
3247 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3248 cmd->vmvf_and_timeout |=
3249 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3256 /* flush pipe on time out */
3257 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3258 /* If no queue group info, we are in a reset flow. Issue the AQ */
3262 /* set RD bit to indicate that command buffer is provided by the driver
3263 * and it needs to be read by the firmware
3265 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3267 for (i = 0; i < num_qgrps; ++i) {
3268 /* Calculate the size taken up by the queue IDs in this group */
3269 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
3271 /* Add the size of the group header */
3272 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
3274 /* If the num of queues is even, add 2 bytes of padding */
3275 if ((qg_list[i].num_qs % 2) == 0)
3280 return ICE_ERR_PARAM;
3283 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3286 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3287 vmvf_num, hw->adminq.sq_last_status);
3289 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3290 LE16_TO_CPU(qg_list[0].q_id[0]),
3291 hw->adminq.sq_last_status);
3297 * ice_aq_move_recfg_lan_txq
3298 * @hw: pointer to the hardware structure
3299 * @num_qs: number of queues to move/reconfigure
3300 * @is_move: true if this operation involves node movement
3301 * @is_tc_change: true if this operation involves a TC change
3302 * @subseq_call: true if this operation is a subsequent call
3303 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3304 * @timeout: timeout in units of 100 usec (valid values 0-50)
3305 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3306 * @buf: struct containing src/dest TEID and per-queue info
3307 * @buf_size: size of buffer for indirect command
3308 * @txqs_moved: out param, number of queues successfully moved
3309 * @cd: pointer to command details structure or NULL
3311 * Move / Reconfigure Tx LAN queues (0x0C32)
3314 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3315 bool is_tc_change, bool subseq_call, bool flush_pipe,
3316 u8 timeout, u32 *blocked_cgds,
3317 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3318 u8 *txqs_moved, struct ice_sq_cd *cd)
3320 struct ice_aqc_move_txqs *cmd;
3321 struct ice_aq_desc desc;
3322 enum ice_status status;
3324 cmd = &desc.params.move_txqs;
3325 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3327 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3328 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3329 return ICE_ERR_PARAM;
3331 if (is_tc_change && !flush_pipe && !blocked_cgds)
3332 return ICE_ERR_PARAM;
3334 if (!is_move && !is_tc_change)
3335 return ICE_ERR_PARAM;
3337 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3340 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3343 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3346 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3349 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3351 cmd->num_qs = num_qs;
3352 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3353 ICE_AQC_Q_CMD_TIMEOUT_M);
3355 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3357 if (!status && txqs_moved)
3358 *txqs_moved = cmd->num_qs;
3360 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3361 is_tc_change && !flush_pipe)
3362 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3367 /* End of FW Admin Queue command wrappers */
3370 * ice_write_byte - write a byte to a packed context structure
3371 * @src_ctx: the context structure to read from
3372 * @dest_ctx: the context to be written to
3373 * @ce_info: a description of the struct to be filled
3376 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3378 u8 src_byte, dest_byte, mask;
3382 /* copy from the next struct field */
3383 from = src_ctx + ce_info->offset;
3385 /* prepare the bits and mask */
3386 shift_width = ce_info->lsb % 8;
3387 mask = (u8)(BIT(ce_info->width) - 1);
3392 /* shift to correct alignment */
3393 mask <<= shift_width;
3394 src_byte <<= shift_width;
3396 /* get the current bits from the target bit string */
3397 dest = dest_ctx + (ce_info->lsb / 8);
3399 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3401 dest_byte &= ~mask; /* get the bits not changing */
3402 dest_byte |= src_byte; /* add in the new bits */
3404 /* put it all back */
3405 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3409 * ice_write_word - write a word to a packed context structure
3410 * @src_ctx: the context structure to read from
3411 * @dest_ctx: the context to be written to
3412 * @ce_info: a description of the struct to be filled
3415 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3422 /* copy from the next struct field */
3423 from = src_ctx + ce_info->offset;
3425 /* prepare the bits and mask */
3426 shift_width = ce_info->lsb % 8;
3427 mask = BIT(ce_info->width) - 1;
3429 /* don't swizzle the bits until after the mask because the mask bits
3430 * will be in a different bit position on big endian machines
3432 src_word = *(u16 *)from;
3435 /* shift to correct alignment */
3436 mask <<= shift_width;
3437 src_word <<= shift_width;
3439 /* get the current bits from the target bit string */
3440 dest = dest_ctx + (ce_info->lsb / 8);
3442 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3444 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3445 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3447 /* put it all back */
3448 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3452 * ice_write_dword - write a dword to a packed context structure
3453 * @src_ctx: the context structure to read from
3454 * @dest_ctx: the context to be written to
3455 * @ce_info: a description of the struct to be filled
3458 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3460 u32 src_dword, mask;
3465 /* copy from the next struct field */
3466 from = src_ctx + ce_info->offset;
3468 /* prepare the bits and mask */
3469 shift_width = ce_info->lsb % 8;
3471 /* if the field width is exactly 32 on an x86 machine, then the shift
3472 * operation will not work because the SHL instructions count is masked
3473 * to 5 bits so the shift will do nothing
3475 if (ce_info->width < 32)
3476 mask = BIT(ce_info->width) - 1;
3480 /* don't swizzle the bits until after the mask because the mask bits
3481 * will be in a different bit position on big endian machines
3483 src_dword = *(u32 *)from;
3486 /* shift to correct alignment */
3487 mask <<= shift_width;
3488 src_dword <<= shift_width;
3490 /* get the current bits from the target bit string */
3491 dest = dest_ctx + (ce_info->lsb / 8);
3493 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3495 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3496 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3498 /* put it all back */
3499 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3503 * ice_write_qword - write a qword to a packed context structure
3504 * @src_ctx: the context structure to read from
3505 * @dest_ctx: the context to be written to
3506 * @ce_info: a description of the struct to be filled
3509 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3511 u64 src_qword, mask;
3516 /* copy from the next struct field */
3517 from = src_ctx + ce_info->offset;
3519 /* prepare the bits and mask */
3520 shift_width = ce_info->lsb % 8;
3522 /* if the field width is exactly 64 on an x86 machine, then the shift
3523 * operation will not work because the SHL instructions count is masked
3524 * to 6 bits so the shift will do nothing
3526 if (ce_info->width < 64)
3527 mask = BIT_ULL(ce_info->width) - 1;
3531 /* don't swizzle the bits until after the mask because the mask bits
3532 * will be in a different bit position on big endian machines
3534 src_qword = *(u64 *)from;
3537 /* shift to correct alignment */
3538 mask <<= shift_width;
3539 src_qword <<= shift_width;
3541 /* get the current bits from the target bit string */
3542 dest = dest_ctx + (ce_info->lsb / 8);
3544 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3546 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
3547 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
3549 /* put it all back */
3550 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3554 * ice_set_ctx - set context bits in packed structure
3555 * @src_ctx: pointer to a generic non-packed context structure
3556 * @dest_ctx: pointer to memory for the packed structure
3557 * @ce_info: a description of the structure to be transformed
3560 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3564 for (f = 0; ce_info[f].width; f++) {
3565 /* We have to deal with each element of the FW response
3566 * using the correct size so that we are correct regardless
3567 * of the endianness of the machine.
3569 switch (ce_info[f].size_of) {
3571 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3574 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3577 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3580 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3583 return ICE_ERR_INVAL_SIZE;
3591 * ice_read_byte - read context byte into struct
3592 * @src_ctx: the context structure to read from
3593 * @dest_ctx: the context to be written to
3594 * @ce_info: a description of the struct to be filled
3597 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3603 /* prepare the bits and mask */
3604 shift_width = ce_info->lsb % 8;
3605 mask = (u8)(BIT(ce_info->width) - 1);
3607 /* shift to correct alignment */
3608 mask <<= shift_width;
3610 /* get the current bits from the src bit string */
3611 src = src_ctx + (ce_info->lsb / 8);
3613 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3615 dest_byte &= ~(mask);
3617 dest_byte >>= shift_width;
3619 /* get the address from the struct field */
3620 target = dest_ctx + ce_info->offset;
3622 /* put it back in the struct */
3623 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3627 * ice_read_word - read context word into struct
3628 * @src_ctx: the context structure to read from
3629 * @dest_ctx: the context to be written to
3630 * @ce_info: a description of the struct to be filled
3633 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3635 u16 dest_word, mask;
3640 /* prepare the bits and mask */
3641 shift_width = ce_info->lsb % 8;
3642 mask = BIT(ce_info->width) - 1;
3644 /* shift to correct alignment */
3645 mask <<= shift_width;
3647 /* get the current bits from the src bit string */
3648 src = src_ctx + (ce_info->lsb / 8);
3650 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
3652 /* the data in the memory is stored as little endian so mask it
3655 src_word &= ~(CPU_TO_LE16(mask));
3657 /* get the data back into host order before shifting */
3658 dest_word = LE16_TO_CPU(src_word);
3660 dest_word >>= shift_width;
3662 /* get the address from the struct field */
3663 target = dest_ctx + ce_info->offset;
3665 /* put it back in the struct */
3666 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3670 * ice_read_dword - read context dword into struct
3671 * @src_ctx: the context structure to read from
3672 * @dest_ctx: the context to be written to
3673 * @ce_info: a description of the struct to be filled
3676 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3678 u32 dest_dword, mask;
3683 /* prepare the bits and mask */
3684 shift_width = ce_info->lsb % 8;
3686 /* if the field width is exactly 32 on an x86 machine, then the shift
3687 * operation will not work because the SHL instructions count is masked
3688 * to 5 bits so the shift will do nothing
3690 if (ce_info->width < 32)
3691 mask = BIT(ce_info->width) - 1;
3695 /* shift to correct alignment */
3696 mask <<= shift_width;
3698 /* get the current bits from the src bit string */
3699 src = src_ctx + (ce_info->lsb / 8);
3701 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
3703 /* the data in the memory is stored as little endian so mask it
3706 src_dword &= ~(CPU_TO_LE32(mask));
3708 /* get the data back into host order before shifting */
3709 dest_dword = LE32_TO_CPU(src_dword);
3711 dest_dword >>= shift_width;
3713 /* get the address from the struct field */
3714 target = dest_ctx + ce_info->offset;
3716 /* put it back in the struct */
3717 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3721 * ice_read_qword - read context qword into struct
3722 * @src_ctx: the context structure to read from
3723 * @dest_ctx: the context to be written to
3724 * @ce_info: a description of the struct to be filled
3727 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3729 u64 dest_qword, mask;
3734 /* prepare the bits and mask */
3735 shift_width = ce_info->lsb % 8;
3737 /* if the field width is exactly 64 on an x86 machine, then the shift
3738 * operation will not work because the SHL instructions count is masked
3739 * to 6 bits so the shift will do nothing
3741 if (ce_info->width < 64)
3742 mask = BIT_ULL(ce_info->width) - 1;
3746 /* shift to correct alignment */
3747 mask <<= shift_width;
3749 /* get the current bits from the src bit string */
3750 src = src_ctx + (ce_info->lsb / 8);
3752 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
3754 /* the data in the memory is stored as little endian so mask it
3757 src_qword &= ~(CPU_TO_LE64(mask));
3759 /* get the data back into host order before shifting */
3760 dest_qword = LE64_TO_CPU(src_qword);
3762 dest_qword >>= shift_width;
3764 /* get the address from the struct field */
3765 target = dest_ctx + ce_info->offset;
3767 /* put it back in the struct */
3768 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3772 * ice_get_ctx - extract context bits from a packed structure
3773 * @src_ctx: pointer to a generic packed context structure
3774 * @dest_ctx: pointer to a generic non-packed context structure
3775 * @ce_info: a description of the structure to be read from
3778 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3782 for (f = 0; ce_info[f].width; f++) {
3783 switch (ce_info[f].size_of) {
3785 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
3788 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
3791 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
3794 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
3797 /* nothing to do, just keep going */
3806 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3807 * @hw: pointer to the HW struct
3808 * @vsi_handle: software VSI handle
3810 * @q_handle: software queue handle
3813 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3815 struct ice_vsi_ctx *vsi;
3816 struct ice_q_ctx *q_ctx;
3818 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3821 if (q_handle >= vsi->num_lan_q_entries[tc])
3823 if (!vsi->lan_q_ctx[tc])
3825 q_ctx = vsi->lan_q_ctx[tc];
3826 return &q_ctx[q_handle];
3831 * @pi: port information structure
3832 * @vsi_handle: software VSI handle
3834 * @q_handle: software queue handle
3835 * @num_qgrps: Number of added queue groups
3836 * @buf: list of queue groups to be added
3837 * @buf_size: size of buffer for indirect command
3838 * @cd: pointer to command details structure or NULL
3840 * This function adds one LAN queue
3843 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3844 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3845 struct ice_sq_cd *cd)
3847 struct ice_aqc_txsched_elem_data node = { 0 };
3848 struct ice_sched_node *parent;
3849 struct ice_q_ctx *q_ctx;
3850 enum ice_status status;
3853 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3856 if (num_qgrps > 1 || buf->num_txqs > 1)
3857 return ICE_ERR_MAX_LIMIT;
3861 if (!ice_is_vsi_valid(hw, vsi_handle))
3862 return ICE_ERR_PARAM;
3864 ice_acquire_lock(&pi->sched_lock);
3866 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3868 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3870 status = ICE_ERR_PARAM;
3874 /* find a parent node */
3875 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3876 ICE_SCHED_NODE_OWNER_LAN);
3878 status = ICE_ERR_PARAM;
3882 buf->parent_teid = parent->info.node_teid;
3883 node.parent_teid = parent->info.node_teid;
3884 /* Mark that the values in the "generic" section as valid. The default
3885 * value in the "generic" section is zero. This means that :
3886 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3887 * - 0 priority among siblings, indicated by Bit 1-3.
3888 * - WFQ, indicated by Bit 4.
3889 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3891 * - Bit 7 is reserved.
3892 * Without setting the generic section as valid in valid_sections, the
3893 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3895 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3897 /* add the LAN queue */
3898 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3899 if (status != ICE_SUCCESS) {
3900 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3901 LE16_TO_CPU(buf->txqs[0].txq_id),
3902 hw->adminq.sq_last_status);
3906 node.node_teid = buf->txqs[0].q_teid;
3907 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3908 q_ctx->q_handle = q_handle;
3909 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
3911 /* add a leaf node into scheduler tree queue layer */
3912 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3914 status = ice_sched_replay_q_bw(pi, q_ctx);
3917 ice_release_lock(&pi->sched_lock);
3923 * @pi: port information structure
3924 * @vsi_handle: software VSI handle
3926 * @num_queues: number of queues
3927 * @q_handles: pointer to software queue handle array
3928 * @q_ids: pointer to the q_id array
3929 * @q_teids: pointer to queue node teids
3930 * @rst_src: if called due to reset, specifies the reset source
3931 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3932 * @cd: pointer to command details structure or NULL
3934 * This function removes queues and their corresponding nodes in SW DB
3937 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3938 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3939 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3940 struct ice_sq_cd *cd)
3942 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3943 struct ice_aqc_dis_txq_item qg_list;
3944 struct ice_q_ctx *q_ctx;
3947 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3951 /* if queue is disabled already yet the disable queue command
3952 * has to be sent to complete the VF reset, then call
3953 * ice_aq_dis_lan_txq without any queue information
3956 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3961 ice_acquire_lock(&pi->sched_lock);
3963 for (i = 0; i < num_queues; i++) {
3964 struct ice_sched_node *node;
3966 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3969 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3971 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3975 if (q_ctx->q_handle != q_handles[i]) {
3976 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3977 q_ctx->q_handle, q_handles[i]);
3980 qg_list.parent_teid = node->info.parent_teid;
3982 qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
3983 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3984 sizeof(qg_list), rst_src, vmvf_num,
3987 if (status != ICE_SUCCESS)
3989 ice_free_sched_node(pi, node);
3990 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3992 ice_release_lock(&pi->sched_lock);
3997 * ice_cfg_vsi_qs - configure the new/existing VSI queues
3998 * @pi: port information structure
3999 * @vsi_handle: software VSI handle
4000 * @tc_bitmap: TC bitmap
4001 * @maxqs: max queues array per TC
4002 * @owner: LAN or RDMA
4004 * This function adds/updates the VSI queues per TC.
4006 static enum ice_status
4007 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4008 u16 *maxqs, u8 owner)
4010 enum ice_status status = ICE_SUCCESS;
4013 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4016 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4017 return ICE_ERR_PARAM;
4019 ice_acquire_lock(&pi->sched_lock);
4021 ice_for_each_traffic_class(i) {
4022 /* configuration is possible only if TC node is present */
4023 if (!ice_sched_get_tc_node(pi, i))
4026 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4027 ice_is_tc_ena(tc_bitmap, i));
4032 ice_release_lock(&pi->sched_lock);
4037 * ice_cfg_vsi_lan - configure VSI LAN queues
4038 * @pi: port information structure
4039 * @vsi_handle: software VSI handle
4040 * @tc_bitmap: TC bitmap
4041 * @max_lanqs: max LAN queues array per TC
4043 * This function adds/updates the VSI LAN queues per TC.
4046 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4049 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4050 ICE_SCHED_NODE_OWNER_LAN);
4054 * ice_replay_pre_init - replay pre initialization
4055 * @hw: pointer to the HW struct
4057 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4059 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4061 struct ice_switch_info *sw = hw->switch_info;
4064 /* Delete old entries from replay filter list head if there is any */
4065 ice_rm_all_sw_replay_rule_info(hw);
4066 /* In start of replay, move entries into replay_rules list, it
4067 * will allow adding rules entries back to filt_rules list,
4068 * which is operational list.
4070 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4071 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4072 &sw->recp_list[i].filt_replay_rules);
4073 ice_sched_replay_agg_vsi_preinit(hw);
4075 return ice_sched_replay_tc_node_bw(hw->port_info);
4079 * ice_replay_vsi - replay VSI configuration
4080 * @hw: pointer to the HW struct
4081 * @vsi_handle: driver VSI handle
4083 * Restore all VSI configuration after reset. It is required to call this
4084 * function with main VSI first.
4086 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4088 enum ice_status status;
4090 if (!ice_is_vsi_valid(hw, vsi_handle))
4091 return ICE_ERR_PARAM;
4093 /* Replay pre-initialization if there is any */
4094 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4095 status = ice_replay_pre_init(hw);
4099 /* Replay per VSI all RSS configurations */
4100 status = ice_replay_rss_cfg(hw, vsi_handle);
4103 /* Replay per VSI all filters */
4104 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4106 status = ice_replay_vsi_agg(hw, vsi_handle);
4111 * ice_replay_post - post replay configuration cleanup
4112 * @hw: pointer to the HW struct
4114 * Post replay cleanup.
4116 void ice_replay_post(struct ice_hw *hw)
4118 /* Delete old entries from replay filter list head */
4119 ice_rm_all_sw_replay_rule_info(hw);
4120 ice_sched_replay_agg(hw);
4124 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4125 * @hw: ptr to the hardware info
4126 * @reg: offset of 64 bit HW register to read from
4127 * @prev_stat_loaded: bool to specify if previous stats are loaded
4128 * @prev_stat: ptr to previous loaded stat value
4129 * @cur_stat: ptr to current stat value
4132 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4133 u64 *prev_stat, u64 *cur_stat)
4135 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4137 /* device stats are not reset at PFR, they likely will not be zeroed
4138 * when the driver starts. Thus, save the value from the first read
4139 * without adding to the statistic value so that we report stats which
4140 * count up from zero.
4142 if (!prev_stat_loaded) {
4143 *prev_stat = new_data;
4147 /* Calculate the difference between the new and old values, and then
4148 * add it to the software stat value.
4150 if (new_data >= *prev_stat)
4151 *cur_stat += new_data - *prev_stat;
4153 /* to manage the potential roll-over */
4154 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4156 /* Update the previously stored value to prepare for next read */
4157 *prev_stat = new_data;
4161 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4162 * @hw: ptr to the hardware info
4163 * @reg: offset of HW register to read from
4164 * @prev_stat_loaded: bool to specify if previous stats are loaded
4165 * @prev_stat: ptr to previous loaded stat value
4166 * @cur_stat: ptr to current stat value
4169 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4170 u64 *prev_stat, u64 *cur_stat)
4174 new_data = rd32(hw, reg);
4176 /* device stats are not reset at PFR, they likely will not be zeroed
4177 * when the driver starts. Thus, save the value from the first read
4178 * without adding to the statistic value so that we report stats which
4179 * count up from zero.
4181 if (!prev_stat_loaded) {
4182 *prev_stat = new_data;
4186 /* Calculate the difference between the new and old values, and then
4187 * add it to the software stat value.
4189 if (new_data >= *prev_stat)
4190 *cur_stat += new_data - *prev_stat;
4192 /* to manage the potential roll-over */
4193 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4195 /* Update the previously stored value to prepare for next read */
4196 *prev_stat = new_data;
4200 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4201 * @hw: ptr to the hardware info
4202 * @vsi_handle: VSI handle
4203 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4204 * @cur_stats: ptr to current stats structure
4206 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4207 * thus cannot be read using the normal ice_stat_update32 function.
4209 * Read the GLV_REPC register associated with the given VSI, and update the
4210 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4212 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4213 * cleared each time it's read.
4215 * Note that the GLV_RDPC register also counts the causes that would trigger
4216 * GLV_REPC. However, it does not give the finer grained detail about why the
4217 * packets are being dropped. The GLV_REPC values can be used to distinguish
4218 * whether Rx packets are dropped due to errors or due to no available
4222 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4223 struct ice_eth_stats *cur_stats)
4225 u16 vsi_num, no_desc, error_cnt;
4228 if (!ice_is_vsi_valid(hw, vsi_handle))
4231 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4233 /* If we haven't loaded stats yet, just clear the current value */
4234 if (!prev_stat_loaded) {
4235 wr32(hw, GLV_REPC(vsi_num), 0);
4239 repc = rd32(hw, GLV_REPC(vsi_num));
4240 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4241 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4243 /* Clear the count by writing to the stats register */
4244 wr32(hw, GLV_REPC(vsi_num), 0);
4246 cur_stats->rx_no_desc += no_desc;
4247 cur_stats->rx_errors += error_cnt;
4251 * ice_sched_query_elem - query element information from HW
4252 * @hw: pointer to the HW struct
4253 * @node_teid: node TEID to be queried
4254 * @buf: buffer to element information
4256 * This function queries HW element information
4259 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4260 struct ice_aqc_get_elem *buf)
4262 u16 buf_size, num_elem_ret = 0;
4263 enum ice_status status;
4265 buf_size = sizeof(*buf);
4266 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4267 buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
4268 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4270 if (status != ICE_SUCCESS || num_elem_ret != 1)
4271 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4276 * ice_get_fw_mode - returns FW mode
4277 * @hw: pointer to the HW struct
4279 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4281 #define ICE_FW_MODE_DBG_M BIT(0)
4282 #define ICE_FW_MODE_REC_M BIT(1)
4283 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4286 /* check the current FW mode */
4287 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4289 if (fw_mode & ICE_FW_MODE_DBG_M)
4290 return ICE_FW_MODE_DBG;
4291 else if (fw_mode & ICE_FW_MODE_REC_M)
4292 return ICE_FW_MODE_REC;
4293 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4294 return ICE_FW_MODE_ROLLBACK;
4296 return ICE_FW_MODE_NORMAL;
4300 * ice_fw_supports_link_override
4301 * @hw: pointer to the hardware structure
4303 * Checks if the firmware supports link override
4305 bool ice_fw_supports_link_override(struct ice_hw *hw)
4307 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4308 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4310 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4311 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4313 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4321 * ice_get_link_default_override
4322 * @ldo: pointer to the link default override struct
4323 * @pi: pointer to the port info struct
4325 * Gets the link default override for a port
4328 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4329 struct ice_port_info *pi)
4331 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4332 struct ice_hw *hw = pi->hw;
4333 enum ice_status status;
4335 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4336 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4338 ice_debug(hw, ICE_DBG_INIT,
4339 "Failed to read link override TLV.\n");
4343 /* Each port has its own config; calculate for our port */
4344 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4345 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4347 /* link options first */
4348 status = ice_read_sr_word(hw, tlv_start, &buf);
4350 ice_debug(hw, ICE_DBG_INIT,
4351 "Failed to read override link options.\n");
4354 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4355 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4356 ICE_LINK_OVERRIDE_PHY_CFG_S;
4358 /* link PHY config */
4359 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4360 status = ice_read_sr_word(hw, offset, &buf);
4362 ice_debug(hw, ICE_DBG_INIT,
4363 "Failed to read override phy config.\n");
4366 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4369 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4370 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4371 status = ice_read_sr_word(hw, (offset + i), &buf);
4373 ice_debug(hw, ICE_DBG_INIT,
4374 "Failed to read override link options.\n");
4377 /* shift 16 bits at a time to fill 64 bits */
4378 ldo->phy_type_low |= ((u64)buf << (i * 16));
4381 /* PHY types high */
4382 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4383 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4384 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4385 status = ice_read_sr_word(hw, (offset + i), &buf);
4387 ice_debug(hw, ICE_DBG_INIT,
4388 "Failed to read override link options.\n");
4391 /* shift 16 bits at a time to fill 64 bits */
4392 ldo->phy_type_high |= ((u64)buf << (i * 16));