1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 300
15 * ice_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
18 * This function sets the MAC type of the adapter based on the
19 * vendor ID and device ID stored in the HW structure.
21 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
23 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
25 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
26 return ICE_ERR_DEVICE_NOT_SUPPORTED;
28 switch (hw->device_id) {
29 case ICE_DEV_ID_E810C_BACKPLANE:
30 case ICE_DEV_ID_E810C_QSFP:
31 case ICE_DEV_ID_E810C_SFP:
32 case ICE_DEV_ID_E810_XXV_BACKPLANE:
33 case ICE_DEV_ID_E810_XXV_QSFP:
34 case ICE_DEV_ID_E810_XXV_SFP:
35 hw->mac_type = ICE_MAC_E810;
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 hw->mac_type = ICE_MAC_GENERIC;
49 hw->mac_type = ICE_MAC_UNKNOWN;
53 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
58 * ice_clear_pf_cfg - Clear PF configuration
59 * @hw: pointer to the hardware structure
61 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
62 * configuration, flow director filters, etc.).
64 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
66 struct ice_aq_desc desc;
68 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
70 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
74 * ice_aq_manage_mac_read - manage MAC address read command
75 * @hw: pointer to the HW struct
76 * @buf: a virtual buffer to hold the manage MAC read response
77 * @buf_size: Size of the virtual buffer
78 * @cd: pointer to command details structure or NULL
80 * This function is used to return per PF station MAC address (0x0107).
81 * NOTE: Upon successful completion of this command, MAC address information
82 * is returned in user specified buffer. Please interpret user specified
83 * buffer as "manage_mac_read" response.
84 * Response such as various MAC addresses are stored in HW struct (port.mac)
85 * ice_aq_discover_caps is expected to be called before this function is called.
87 static enum ice_status
88 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
91 struct ice_aqc_manage_mac_read_resp *resp;
92 struct ice_aqc_manage_mac_read *cmd;
93 struct ice_aq_desc desc;
94 enum ice_status status;
98 cmd = &desc.params.mac_read;
100 if (buf_size < sizeof(*resp))
101 return ICE_ERR_BUF_TOO_SHORT;
103 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
105 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
109 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
110 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
112 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
113 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
117 /* A single port can report up to two (LAN and WoL) addresses */
118 for (i = 0; i < cmd->num_addr; i++)
119 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
120 ice_memcpy(hw->port_info->mac.lan_addr,
121 resp[i].mac_addr, ETH_ALEN,
123 ice_memcpy(hw->port_info->mac.perm_addr,
125 ETH_ALEN, ICE_DMA_TO_NONDMA);
132 * ice_aq_get_phy_caps - returns PHY capabilities
133 * @pi: port information structure
134 * @qual_mods: report qualified modules
135 * @report_mode: report mode capabilities
136 * @pcaps: structure for PHY capabilities to be filled
137 * @cd: pointer to command details structure or NULL
139 * Returns the various PHY capabilities supported on the Port (0x0600)
142 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
143 struct ice_aqc_get_phy_caps_data *pcaps,
144 struct ice_sq_cd *cd)
146 struct ice_aqc_get_phy_caps *cmd;
147 u16 pcaps_size = sizeof(*pcaps);
148 struct ice_aq_desc desc;
149 enum ice_status status;
151 cmd = &desc.params.get_phy;
153 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
154 return ICE_ERR_PARAM;
156 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
159 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
161 cmd->param0 |= CPU_TO_LE16(report_mode);
162 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
164 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
165 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
166 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
173 * ice_aq_get_link_topo_handle - get link topology node return status
174 * @pi: port information structure
175 * @node_type: requested node type
176 * @cd: pointer to command details structure or NULL
178 * Get link topology node return status for specified node type (0x06E0)
180 * Node type cage can be used to determine if cage is present. If AQC
181 * returns error (ENOENT), then no cage present. If no cage present, then
182 * connection type is backplane or BASE-T.
184 static enum ice_status
185 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
186 struct ice_sq_cd *cd)
188 struct ice_aqc_get_link_topo *cmd;
189 struct ice_aq_desc desc;
191 cmd = &desc.params.get_link_topo;
193 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
195 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
196 ICE_AQC_LINK_TOPO_NODE_CTX_S);
199 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
201 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
205 * ice_is_media_cage_present
206 * @pi: port information structure
208 * Returns true if media cage is present, else false. If no cage, then
209 * media type is backplane or BASE-T.
211 static bool ice_is_media_cage_present(struct ice_port_info *pi)
213 /* Node type cage can be used to determine if cage is present. If AQC
214 * returns error (ENOENT), then no cage present. If no cage present then
215 * connection type is backplane or BASE-T.
217 return !ice_aq_get_link_topo_handle(pi,
218 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
223 * ice_get_media_type - Gets media type
224 * @pi: port information structure
226 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
228 struct ice_link_status *hw_link_info;
231 return ICE_MEDIA_UNKNOWN;
233 hw_link_info = &pi->phy.link_info;
234 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
235 /* If more than one media type is selected, report unknown */
236 return ICE_MEDIA_UNKNOWN;
238 if (hw_link_info->phy_type_low) {
239 switch (hw_link_info->phy_type_low) {
240 case ICE_PHY_TYPE_LOW_1000BASE_SX:
241 case ICE_PHY_TYPE_LOW_1000BASE_LX:
242 case ICE_PHY_TYPE_LOW_10GBASE_SR:
243 case ICE_PHY_TYPE_LOW_10GBASE_LR:
244 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
245 case ICE_PHY_TYPE_LOW_25GBASE_SR:
246 case ICE_PHY_TYPE_LOW_25GBASE_LR:
247 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
248 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
249 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
250 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
251 case ICE_PHY_TYPE_LOW_50GBASE_SR:
252 case ICE_PHY_TYPE_LOW_50GBASE_FR:
253 case ICE_PHY_TYPE_LOW_50GBASE_LR:
254 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
255 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
256 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
257 case ICE_PHY_TYPE_LOW_100GBASE_DR:
258 return ICE_MEDIA_FIBER;
259 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
260 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
261 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
262 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
263 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
264 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
265 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
266 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
267 return ICE_MEDIA_FIBER;
268 case ICE_PHY_TYPE_LOW_100BASE_TX:
269 case ICE_PHY_TYPE_LOW_1000BASE_T:
270 case ICE_PHY_TYPE_LOW_2500BASE_T:
271 case ICE_PHY_TYPE_LOW_5GBASE_T:
272 case ICE_PHY_TYPE_LOW_10GBASE_T:
273 case ICE_PHY_TYPE_LOW_25GBASE_T:
274 return ICE_MEDIA_BASET;
275 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
276 case ICE_PHY_TYPE_LOW_25GBASE_CR:
277 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
278 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
279 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
280 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
281 case ICE_PHY_TYPE_LOW_50GBASE_CP:
282 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
283 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
284 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
286 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
287 case ICE_PHY_TYPE_LOW_40G_XLAUI:
288 case ICE_PHY_TYPE_LOW_50G_LAUI2:
289 case ICE_PHY_TYPE_LOW_50G_AUI2:
290 case ICE_PHY_TYPE_LOW_50G_AUI1:
291 case ICE_PHY_TYPE_LOW_100G_AUI4:
292 case ICE_PHY_TYPE_LOW_100G_CAUI4:
293 if (ice_is_media_cage_present(pi))
296 case ICE_PHY_TYPE_LOW_1000BASE_KX:
297 case ICE_PHY_TYPE_LOW_2500BASE_KX:
298 case ICE_PHY_TYPE_LOW_2500BASE_X:
299 case ICE_PHY_TYPE_LOW_5GBASE_KR:
300 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
301 case ICE_PHY_TYPE_LOW_25GBASE_KR:
302 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
303 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
304 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
305 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
306 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
307 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
308 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
309 return ICE_MEDIA_BACKPLANE;
312 switch (hw_link_info->phy_type_high) {
313 case ICE_PHY_TYPE_HIGH_100G_AUI2:
314 if (ice_is_media_cage_present(pi))
317 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
318 return ICE_MEDIA_BACKPLANE;
319 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
320 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
321 return ICE_MEDIA_FIBER;
324 return ICE_MEDIA_UNKNOWN;
328 * ice_aq_get_link_info
329 * @pi: port information structure
330 * @ena_lse: enable/disable LinkStatusEvent reporting
331 * @link: pointer to link status structure - optional
332 * @cd: pointer to command details structure or NULL
334 * Get Link Status (0x607). Returns the link status of the adapter.
337 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
338 struct ice_link_status *link, struct ice_sq_cd *cd)
340 struct ice_aqc_get_link_status_data link_data = { 0 };
341 struct ice_aqc_get_link_status *resp;
342 struct ice_link_status *li_old, *li;
343 enum ice_media_type *hw_media_type;
344 struct ice_fc_info *hw_fc_info;
345 bool tx_pause, rx_pause;
346 struct ice_aq_desc desc;
347 enum ice_status status;
352 return ICE_ERR_PARAM;
354 li_old = &pi->phy.link_info_old;
355 hw_media_type = &pi->phy.media_type;
356 li = &pi->phy.link_info;
357 hw_fc_info = &pi->fc;
359 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
360 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
361 resp = &desc.params.get_link_status;
362 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
363 resp->lport_num = pi->lport;
365 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
367 if (status != ICE_SUCCESS)
370 /* save off old link status information */
373 /* update current link status information */
374 li->link_speed = LE16_TO_CPU(link_data.link_speed);
375 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
376 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
377 *hw_media_type = ice_get_media_type(pi);
378 li->link_info = link_data.link_info;
379 li->an_info = link_data.an_info;
380 li->ext_info = link_data.ext_info;
381 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
382 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
383 li->topo_media_conflict = link_data.topo_media_conflict;
384 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
385 ICE_AQ_CFG_PACING_TYPE_M);
388 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
389 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
390 if (tx_pause && rx_pause)
391 hw_fc_info->current_mode = ICE_FC_FULL;
393 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
395 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
397 hw_fc_info->current_mode = ICE_FC_NONE;
399 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
401 ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
402 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
403 (unsigned long long)li->phy_type_low);
404 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
405 (unsigned long long)li->phy_type_high);
406 ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
407 ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
408 ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
409 ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
410 ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
411 ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
412 ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
414 /* save link status information */
418 /* flag cleared so calling functions don't call AQ again */
419 pi->phy.get_link_info = false;
425 * ice_fill_tx_timer_and_fc_thresh
426 * @hw: pointer to the HW struct
427 * @cmd: pointer to MAC cfg structure
429 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
433 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
434 struct ice_aqc_set_mac_cfg *cmd)
436 u16 fc_thres_val, tx_timer_val;
439 /* We read back the transmit timer and fc threshold value of
440 * LFC. Thus, we will use index =
441 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
443 * Also, because we are opearating on transmit timer and fc
444 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
446 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
448 /* Retrieve the transmit timer */
449 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
451 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
452 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
454 /* Retrieve the fc threshold */
455 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
456 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
458 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
463 * @hw: pointer to the HW struct
464 * @max_frame_size: Maximum Frame Size to be supported
465 * @cd: pointer to command details structure or NULL
467 * Set MAC configuration (0x0603)
470 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
472 struct ice_aqc_set_mac_cfg *cmd;
473 struct ice_aq_desc desc;
475 cmd = &desc.params.set_mac_cfg;
477 if (max_frame_size == 0)
478 return ICE_ERR_PARAM;
480 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
482 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
484 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
486 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
490 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
491 * @hw: pointer to the HW struct
493 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
495 struct ice_switch_info *sw;
496 enum ice_status status;
498 hw->switch_info = (struct ice_switch_info *)
499 ice_malloc(hw, sizeof(*hw->switch_info));
501 sw = hw->switch_info;
504 return ICE_ERR_NO_MEMORY;
506 INIT_LIST_HEAD(&sw->vsi_list_map_head);
508 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
510 ice_free(hw, hw->switch_info);
517 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
518 * @hw: pointer to the HW struct
519 * @sw: pointer to switch info struct for which function clears filters
522 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
524 struct ice_vsi_list_map_info *v_pos_map;
525 struct ice_vsi_list_map_info *v_tmp_map;
526 struct ice_sw_recipe *recps;
532 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
533 ice_vsi_list_map_info, list_entry) {
534 LIST_DEL(&v_pos_map->list_entry);
535 ice_free(hw, v_pos_map);
537 recps = sw->recp_list;
538 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
539 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
541 recps[i].root_rid = i;
542 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
543 &recps[i].rg_list, ice_recp_grp_entry,
545 LIST_DEL(&rg_entry->l_entry);
546 ice_free(hw, rg_entry);
549 if (recps[i].adv_rule) {
550 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
551 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
553 ice_destroy_lock(&recps[i].filt_rule_lock);
554 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
555 &recps[i].filt_rules,
556 ice_adv_fltr_mgmt_list_entry,
558 LIST_DEL(&lst_itr->list_entry);
559 ice_free(hw, lst_itr->lkups);
560 ice_free(hw, lst_itr);
563 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
565 ice_destroy_lock(&recps[i].filt_rule_lock);
566 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
567 &recps[i].filt_rules,
568 ice_fltr_mgmt_list_entry,
570 LIST_DEL(&lst_itr->list_entry);
571 ice_free(hw, lst_itr);
574 if (recps[i].root_buf)
575 ice_free(hw, recps[i].root_buf);
577 ice_rm_sw_replay_rule_info(hw, sw);
578 ice_free(hw, sw->recp_list);
583 * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
584 * @hw: pointer to the HW struct
586 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
588 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
592 * ice_get_itr_intrl_gran
593 * @hw: pointer to the HW struct
595 * Determines the ITR/INTRL granularities based on the maximum aggregate
596 * bandwidth according to the device's configuration during power-on.
598 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
600 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
601 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
602 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
604 switch (max_agg_bw) {
605 case ICE_MAX_AGG_BW_200G:
606 case ICE_MAX_AGG_BW_100G:
607 case ICE_MAX_AGG_BW_50G:
608 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
609 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
611 case ICE_MAX_AGG_BW_25G:
612 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
613 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
619 * ice_print_rollback_msg - print FW rollback message
620 * @hw: pointer to the hardware structure
622 void ice_print_rollback_msg(struct ice_hw *hw)
624 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
625 struct ice_nvm_info *nvm = &hw->nvm;
626 struct ice_orom_info *orom;
630 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
631 nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
632 orom->build, orom->patch);
634 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
635 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
639 * ice_init_hw - main hardware initialization routine
640 * @hw: pointer to the hardware structure
642 enum ice_status ice_init_hw(struct ice_hw *hw)
644 struct ice_aqc_get_phy_caps_data *pcaps;
645 enum ice_status status;
649 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
651 /* Set MAC type based on DeviceID */
652 status = ice_set_mac_type(hw);
656 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
657 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
658 PF_FUNC_RID_FUNCTION_NUMBER_S;
660 status = ice_reset(hw, ICE_RESET_PFR);
664 ice_get_itr_intrl_gran(hw);
666 status = ice_create_all_ctrlq(hw);
668 goto err_unroll_cqinit;
670 status = ice_init_nvm(hw);
672 goto err_unroll_cqinit;
674 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
675 ice_print_rollback_msg(hw);
677 status = ice_clear_pf_cfg(hw);
679 goto err_unroll_cqinit;
681 /* Set bit to enable Flow Director filters */
682 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
683 INIT_LIST_HEAD(&hw->fdir_list_head);
685 ice_clear_pxe_mode(hw);
687 status = ice_get_caps(hw);
689 goto err_unroll_cqinit;
691 hw->port_info = (struct ice_port_info *)
692 ice_malloc(hw, sizeof(*hw->port_info));
693 if (!hw->port_info) {
694 status = ICE_ERR_NO_MEMORY;
695 goto err_unroll_cqinit;
698 /* set the back pointer to HW */
699 hw->port_info->hw = hw;
701 /* Initialize port_info struct with switch configuration data */
702 status = ice_get_initial_sw_cfg(hw);
704 goto err_unroll_alloc;
707 /* Query the allocated resources for Tx scheduler */
708 status = ice_sched_query_res_alloc(hw);
710 ice_debug(hw, ICE_DBG_SCHED,
711 "Failed to get scheduler allocated resources\n");
712 goto err_unroll_alloc;
714 ice_sched_get_psm_clk_freq(hw);
716 /* Initialize port_info struct with scheduler data */
717 status = ice_sched_init_port(hw->port_info);
719 goto err_unroll_sched;
721 pcaps = (struct ice_aqc_get_phy_caps_data *)
722 ice_malloc(hw, sizeof(*pcaps));
724 status = ICE_ERR_NO_MEMORY;
725 goto err_unroll_sched;
728 /* Initialize port_info struct with PHY capabilities */
729 status = ice_aq_get_phy_caps(hw->port_info, false,
730 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
733 goto err_unroll_sched;
735 /* Initialize port_info struct with link information */
736 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
738 goto err_unroll_sched;
739 /* need a valid SW entry point to build a Tx tree */
740 if (!hw->sw_entry_point_layer) {
741 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
742 status = ICE_ERR_CFG;
743 goto err_unroll_sched;
745 INIT_LIST_HEAD(&hw->agg_list);
746 /* Initialize max burst size */
747 if (!hw->max_burst_size)
748 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
750 status = ice_init_fltr_mgmt_struct(hw);
752 goto err_unroll_sched;
754 /* Get MAC information */
755 /* A single port can report up to two (LAN and WoL) addresses */
756 mac_buf = ice_calloc(hw, 2,
757 sizeof(struct ice_aqc_manage_mac_read_resp));
758 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
761 status = ICE_ERR_NO_MEMORY;
762 goto err_unroll_fltr_mgmt_struct;
765 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
766 ice_free(hw, mac_buf);
769 goto err_unroll_fltr_mgmt_struct;
770 /* enable jumbo frame support at MAC level */
771 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
773 goto err_unroll_fltr_mgmt_struct;
774 /* Obtain counter base index which would be used by flow director */
775 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
777 goto err_unroll_fltr_mgmt_struct;
778 status = ice_init_hw_tbls(hw);
780 goto err_unroll_fltr_mgmt_struct;
781 ice_init_lock(&hw->tnl_lock);
784 err_unroll_fltr_mgmt_struct:
785 ice_cleanup_fltr_mgmt_struct(hw);
787 ice_sched_cleanup_all(hw);
789 ice_free(hw, hw->port_info);
790 hw->port_info = NULL;
792 ice_destroy_all_ctrlq(hw);
797 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
798 * @hw: pointer to the hardware structure
800 * This should be called only during nominal operation, not as a result of
801 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
802 * applicable initializations if it fails for any reason.
804 void ice_deinit_hw(struct ice_hw *hw)
806 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
807 ice_cleanup_fltr_mgmt_struct(hw);
809 ice_sched_cleanup_all(hw);
810 ice_sched_clear_agg(hw);
812 ice_free_hw_tbls(hw);
813 ice_destroy_lock(&hw->tnl_lock);
816 ice_free(hw, hw->port_info);
817 hw->port_info = NULL;
820 ice_destroy_all_ctrlq(hw);
822 /* Clear VSI contexts if not already cleared */
823 ice_clear_all_vsi_ctx(hw);
827 * ice_check_reset - Check to see if a global reset is complete
828 * @hw: pointer to the hardware structure
830 enum ice_status ice_check_reset(struct ice_hw *hw)
832 u32 cnt, reg = 0, grst_delay, uld_mask;
834 /* Poll for Device Active state in case a recent CORER, GLOBR,
835 * or EMPR has occurred. The grst delay value is in 100ms units.
836 * Add 1sec for outstanding AQ commands that can take a long time.
838 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
839 GLGEN_RSTCTL_GRSTDEL_S) + 10;
841 for (cnt = 0; cnt < grst_delay; cnt++) {
842 ice_msec_delay(100, true);
843 reg = rd32(hw, GLGEN_RSTAT);
844 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
848 if (cnt == grst_delay) {
849 ice_debug(hw, ICE_DBG_INIT,
850 "Global reset polling failed to complete.\n");
851 return ICE_ERR_RESET_FAILED;
854 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
855 GLNVM_ULD_PCIER_DONE_1_M |\
856 GLNVM_ULD_CORER_DONE_M |\
857 GLNVM_ULD_GLOBR_DONE_M |\
858 GLNVM_ULD_POR_DONE_M |\
859 GLNVM_ULD_POR_DONE_1_M |\
860 GLNVM_ULD_PCIER_DONE_2_M)
862 uld_mask = ICE_RESET_DONE_MASK;
864 /* Device is Active; check Global Reset processes are done */
865 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
866 reg = rd32(hw, GLNVM_ULD) & uld_mask;
867 if (reg == uld_mask) {
868 ice_debug(hw, ICE_DBG_INIT,
869 "Global reset processes done. %d\n", cnt);
872 ice_msec_delay(10, true);
875 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
876 ice_debug(hw, ICE_DBG_INIT,
877 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
879 return ICE_ERR_RESET_FAILED;
886 * ice_pf_reset - Reset the PF
887 * @hw: pointer to the hardware structure
889 * If a global reset has been triggered, this function checks
890 * for its completion and then issues the PF reset
892 static enum ice_status ice_pf_reset(struct ice_hw *hw)
896 /* If at function entry a global reset was already in progress, i.e.
897 * state is not 'device active' or any of the reset done bits are not
898 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
899 * global reset is done.
901 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
902 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
903 /* poll on global reset currently in progress until done */
904 if (ice_check_reset(hw))
905 return ICE_ERR_RESET_FAILED;
911 reg = rd32(hw, PFGEN_CTRL);
913 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
915 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
916 reg = rd32(hw, PFGEN_CTRL);
917 if (!(reg & PFGEN_CTRL_PFSWR_M))
920 ice_msec_delay(1, true);
923 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
924 ice_debug(hw, ICE_DBG_INIT,
925 "PF reset polling failed to complete.\n");
926 return ICE_ERR_RESET_FAILED;
933 * ice_reset - Perform different types of reset
934 * @hw: pointer to the hardware structure
935 * @req: reset request
937 * This function triggers a reset as specified by the req parameter.
940 * If anything other than a PF reset is triggered, PXE mode is restored.
941 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
942 * interface has been restored in the rebuild flow.
944 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
950 return ice_pf_reset(hw);
951 case ICE_RESET_CORER:
952 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
953 val = GLGEN_RTRIG_CORER_M;
955 case ICE_RESET_GLOBR:
956 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
957 val = GLGEN_RTRIG_GLOBR_M;
960 return ICE_ERR_PARAM;
963 val |= rd32(hw, GLGEN_RTRIG);
964 wr32(hw, GLGEN_RTRIG, val);
967 /* wait for the FW to be ready */
968 return ice_check_reset(hw);
972 * ice_copy_rxq_ctx_to_hw
973 * @hw: pointer to the hardware structure
974 * @ice_rxq_ctx: pointer to the rxq context
975 * @rxq_index: the index of the Rx queue
977 * Copies rxq context from dense structure to HW register space
979 static enum ice_status
980 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
985 return ICE_ERR_BAD_PTR;
987 if (rxq_index > QRX_CTRL_MAX_INDEX)
988 return ICE_ERR_PARAM;
990 /* Copy each dword separately to HW */
991 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
992 wr32(hw, QRX_CONTEXT(i, rxq_index),
993 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
995 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
996 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1002 /* LAN Rx Queue Context */
1003 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1004 /* Field Width LSB */
1005 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1006 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1007 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1008 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1009 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1010 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1011 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1012 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1013 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1014 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1015 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1016 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1017 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1018 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1019 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1020 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1021 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1022 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1023 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1024 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1030 * @hw: pointer to the hardware structure
1031 * @rlan_ctx: pointer to the rxq context
1032 * @rxq_index: the index of the Rx queue
1034 * Converts rxq context from sparse to dense structure and then writes
1035 * it to HW register space and enables the hardware to prefetch descriptors
1036 * instead of only fetching them on demand
1039 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1042 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1045 return ICE_ERR_BAD_PTR;
1047 rlan_ctx->prefena = 1;
1049 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1050 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1055 * @hw: pointer to the hardware structure
1056 * @rxq_index: the index of the Rx queue to clear
1058 * Clears rxq context in HW register space
1060 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1064 if (rxq_index > QRX_CTRL_MAX_INDEX)
1065 return ICE_ERR_PARAM;
1067 /* Clear each dword register separately */
1068 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1069 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1074 /* LAN Tx Queue Context */
1075 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1076 /* Field Width LSB */
1077 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1078 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1079 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1080 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1081 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1082 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1083 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1084 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1085 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1086 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1087 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1088 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1089 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1090 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1091 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1092 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1093 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1094 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1095 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1096 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1097 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1098 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1099 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1100 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1101 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1102 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1103 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1104 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1109 * ice_copy_tx_cmpltnq_ctx_to_hw
1110 * @hw: pointer to the hardware structure
1111 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1112 * @tx_cmpltnq_index: the index of the completion queue
1114 * Copies Tx completion queue context from dense structure to HW register space
1116 static enum ice_status
1117 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1118 u32 tx_cmpltnq_index)
1122 if (!ice_tx_cmpltnq_ctx)
1123 return ICE_ERR_BAD_PTR;
1125 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1126 return ICE_ERR_PARAM;
1128 /* Copy each dword separately to HW */
1129 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1130 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1131 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1133 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1134 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1140 /* LAN Tx Completion Queue Context */
1141 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1142 /* Field Width LSB */
1143 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1144 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1145 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1146 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1147 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1148 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1149 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1150 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1151 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1152 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1157 * ice_write_tx_cmpltnq_ctx
1158 * @hw: pointer to the hardware structure
1159 * @tx_cmpltnq_ctx: pointer to the completion queue context
1160 * @tx_cmpltnq_index: the index of the completion queue
1162 * Converts completion queue context from sparse to dense structure and then
1163 * writes it to HW register space
1166 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1167 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1168 u32 tx_cmpltnq_index)
1170 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1172 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1173 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1177 * ice_clear_tx_cmpltnq_ctx
1178 * @hw: pointer to the hardware structure
1179 * @tx_cmpltnq_index: the index of the completion queue to clear
1181 * Clears Tx completion queue context in HW register space
1184 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1188 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1189 return ICE_ERR_PARAM;
1191 /* Clear each dword register separately */
1192 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1193 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1199 * ice_copy_tx_drbell_q_ctx_to_hw
1200 * @hw: pointer to the hardware structure
1201 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1202 * @tx_drbell_q_index: the index of the doorbell queue
1204 * Copies doorbell queue context from dense structure to HW register space
1206 static enum ice_status
1207 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1208 u32 tx_drbell_q_index)
1212 if (!ice_tx_drbell_q_ctx)
1213 return ICE_ERR_BAD_PTR;
1215 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1216 return ICE_ERR_PARAM;
1218 /* Copy each dword separately to HW */
1219 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1220 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1221 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1223 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1224 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1230 /* LAN Tx Doorbell Queue Context info */
1231 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1232 /* Field Width LSB */
1233 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1234 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1235 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1236 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1237 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1238 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1239 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1240 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1241 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1242 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1243 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1248 * ice_write_tx_drbell_q_ctx
1249 * @hw: pointer to the hardware structure
1250 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1251 * @tx_drbell_q_index: the index of the doorbell queue
1253 * Converts doorbell queue context from sparse to dense structure and then
1254 * writes it to HW register space
1257 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1258 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1259 u32 tx_drbell_q_index)
1261 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1263 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1264 ice_tx_drbell_q_ctx_info);
1265 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1269 * ice_clear_tx_drbell_q_ctx
1270 * @hw: pointer to the hardware structure
1271 * @tx_drbell_q_index: the index of the doorbell queue to clear
1273 * Clears doorbell queue context in HW register space
1276 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1280 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1281 return ICE_ERR_PARAM;
1283 /* Clear each dword register separately */
1284 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1285 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1290 /* FW Admin Queue command wrappers */
1293 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1294 * @hw: pointer to the HW struct
1295 * @desc: descriptor describing the command
1296 * @buf: buffer to use for indirect commands (NULL for direct commands)
1297 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1298 * @cd: pointer to command details structure
1300 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1303 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1304 u16 buf_size, struct ice_sq_cd *cd)
1306 if (hw->aq_send_cmd_fn) {
1307 enum ice_status status = ICE_ERR_NOT_READY;
1308 u16 retval = ICE_AQ_RC_OK;
1310 ice_acquire_lock(&hw->adminq.sq_lock);
1311 if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
1313 retval = LE16_TO_CPU(desc->retval);
1314 /* strip off FW internal code */
1317 if (retval == ICE_AQ_RC_OK)
1318 status = ICE_SUCCESS;
1320 status = ICE_ERR_AQ_ERROR;
1323 hw->adminq.sq_last_status = (enum ice_aq_err)retval;
1324 ice_release_lock(&hw->adminq.sq_lock);
1328 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1333 * @hw: pointer to the HW struct
1334 * @cd: pointer to command details structure or NULL
1336 * Get the firmware version (0x0001) from the admin queue commands
1338 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1340 struct ice_aqc_get_ver *resp;
1341 struct ice_aq_desc desc;
1342 enum ice_status status;
1344 resp = &desc.params.get_ver;
1346 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1348 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1351 hw->fw_branch = resp->fw_branch;
1352 hw->fw_maj_ver = resp->fw_major;
1353 hw->fw_min_ver = resp->fw_minor;
1354 hw->fw_patch = resp->fw_patch;
1355 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1356 hw->api_branch = resp->api_branch;
1357 hw->api_maj_ver = resp->api_major;
1358 hw->api_min_ver = resp->api_minor;
1359 hw->api_patch = resp->api_patch;
1366 * ice_aq_send_driver_ver
1367 * @hw: pointer to the HW struct
1368 * @dv: driver's major, minor version
1369 * @cd: pointer to command details structure or NULL
1371 * Send the driver version (0x0002) to the firmware
1374 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1375 struct ice_sq_cd *cd)
1377 struct ice_aqc_driver_ver *cmd;
1378 struct ice_aq_desc desc;
1381 cmd = &desc.params.driver_ver;
1384 return ICE_ERR_PARAM;
1386 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1388 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1389 cmd->major_ver = dv->major_ver;
1390 cmd->minor_ver = dv->minor_ver;
1391 cmd->build_ver = dv->build_ver;
1392 cmd->subbuild_ver = dv->subbuild_ver;
1395 while (len < sizeof(dv->driver_string) &&
1396 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1399 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1404 * @hw: pointer to the HW struct
1405 * @unloading: is the driver unloading itself
1407 * Tell the Firmware that we're shutting down the AdminQ and whether
1408 * or not the driver is unloading as well (0x0003).
1410 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1412 struct ice_aqc_q_shutdown *cmd;
1413 struct ice_aq_desc desc;
1415 cmd = &desc.params.q_shutdown;
1417 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1420 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1422 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1427 * @hw: pointer to the HW struct
1429 * @access: access type
1430 * @sdp_number: resource number
1431 * @timeout: the maximum time in ms that the driver may hold the resource
1432 * @cd: pointer to command details structure or NULL
1434 * Requests common resource using the admin queue commands (0x0008).
1435 * When attempting to acquire the Global Config Lock, the driver can
1436 * learn of three states:
1437 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1438 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1439 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1440 * successfully downloaded the package; the driver does
1441 * not have to download the package and can continue
1444 * Note that if the caller is in an acquire lock, perform action, release lock
1445 * phase of operation, it is possible that the FW may detect a timeout and issue
1446 * a CORER. In this case, the driver will receive a CORER interrupt and will
1447 * have to determine its cause. The calling thread that is handling this flow
1448 * will likely get an error propagated back to it indicating the Download
1449 * Package, Update Package or the Release Resource AQ commands timed out.
1451 static enum ice_status
1452 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1453 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1454 struct ice_sq_cd *cd)
1456 struct ice_aqc_req_res *cmd_resp;
1457 struct ice_aq_desc desc;
1458 enum ice_status status;
1460 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1462 cmd_resp = &desc.params.res_owner;
1464 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1466 cmd_resp->res_id = CPU_TO_LE16(res);
1467 cmd_resp->access_type = CPU_TO_LE16(access);
1468 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1469 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1472 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1474 /* The completion specifies the maximum time in ms that the driver
1475 * may hold the resource in the Timeout field.
1478 /* Global config lock response utilizes an additional status field.
1480 * If the Global config lock resource is held by some other driver, the
1481 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1482 * and the timeout field indicates the maximum time the current owner
1483 * of the resource has to free it.
1485 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1486 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1487 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1489 } else if (LE16_TO_CPU(cmd_resp->status) ==
1490 ICE_AQ_RES_GLBL_IN_PROG) {
1491 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1492 return ICE_ERR_AQ_ERROR;
1493 } else if (LE16_TO_CPU(cmd_resp->status) ==
1494 ICE_AQ_RES_GLBL_DONE) {
1495 return ICE_ERR_AQ_NO_WORK;
1498 /* invalid FW response, force a timeout immediately */
1500 return ICE_ERR_AQ_ERROR;
1503 /* If the resource is held by some other driver, the command completes
1504 * with a busy return value and the timeout field indicates the maximum
1505 * time the current owner of the resource has to free it.
1507 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1508 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1514 * ice_aq_release_res
1515 * @hw: pointer to the HW struct
1517 * @sdp_number: resource number
1518 * @cd: pointer to command details structure or NULL
1520 * release common resource using the admin queue commands (0x0009)
1522 static enum ice_status
1523 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1524 struct ice_sq_cd *cd)
1526 struct ice_aqc_req_res *cmd;
1527 struct ice_aq_desc desc;
1529 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1531 cmd = &desc.params.res_owner;
1533 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1535 cmd->res_id = CPU_TO_LE16(res);
1536 cmd->res_number = CPU_TO_LE32(sdp_number);
1538 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1543 * @hw: pointer to the HW structure
1545 * @access: access type (read or write)
1546 * @timeout: timeout in milliseconds
1548 * This function will attempt to acquire the ownership of a resource.
1551 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1552 enum ice_aq_res_access_type access, u32 timeout)
1554 #define ICE_RES_POLLING_DELAY_MS 10
1555 u32 delay = ICE_RES_POLLING_DELAY_MS;
1556 u32 time_left = timeout;
1557 enum ice_status status;
1559 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1561 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1563 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1564 * previously acquired the resource and performed any necessary updates;
1565 * in this case the caller does not obtain the resource and has no
1566 * further work to do.
1568 if (status == ICE_ERR_AQ_NO_WORK)
1569 goto ice_acquire_res_exit;
1572 ice_debug(hw, ICE_DBG_RES,
1573 "resource %d acquire type %d failed.\n", res, access);
1575 /* If necessary, poll until the current lock owner timeouts */
1576 timeout = time_left;
1577 while (status && timeout && time_left) {
1578 ice_msec_delay(delay, true);
1579 timeout = (timeout > delay) ? timeout - delay : 0;
1580 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1582 if (status == ICE_ERR_AQ_NO_WORK)
1583 /* lock free, but no work to do */
1590 if (status && status != ICE_ERR_AQ_NO_WORK)
1591 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1593 ice_acquire_res_exit:
1594 if (status == ICE_ERR_AQ_NO_WORK) {
1595 if (access == ICE_RES_WRITE)
1596 ice_debug(hw, ICE_DBG_RES,
1597 "resource indicates no work to do.\n");
1599 ice_debug(hw, ICE_DBG_RES,
1600 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1607 * @hw: pointer to the HW structure
1610 * This function will release a resource using the proper Admin Command.
1612 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1614 enum ice_status status;
1615 u32 total_delay = 0;
1617 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1619 status = ice_aq_release_res(hw, res, 0, NULL);
1621 /* there are some rare cases when trying to release the resource
1622 * results in an admin queue timeout, so handle them correctly
1624 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1625 (total_delay < hw->adminq.sq_cmd_timeout)) {
1626 ice_msec_delay(1, true);
1627 status = ice_aq_release_res(hw, res, 0, NULL);
1633 * ice_aq_alloc_free_res - command to allocate/free resources
1634 * @hw: pointer to the HW struct
1635 * @num_entries: number of resource entries in buffer
1636 * @buf: Indirect buffer to hold data parameters and response
1637 * @buf_size: size of buffer for indirect commands
1638 * @opc: pass in the command opcode
1639 * @cd: pointer to command details structure or NULL
1641 * Helper function to allocate/free resources using the admin queue commands
1644 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1645 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1646 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1648 struct ice_aqc_alloc_free_res_cmd *cmd;
1649 struct ice_aq_desc desc;
1651 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1653 cmd = &desc.params.sw_res_ctrl;
1656 return ICE_ERR_PARAM;
1658 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1659 return ICE_ERR_PARAM;
1661 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1663 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1665 cmd->num_entries = CPU_TO_LE16(num_entries);
1667 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1671 * ice_alloc_hw_res - allocate resource
1672 * @hw: pointer to the HW struct
1673 * @type: type of resource
1674 * @num: number of resources to allocate
1675 * @btm: allocate from bottom
1676 * @res: pointer to array that will receive the resources
1679 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1681 struct ice_aqc_alloc_free_res_elem *buf;
1682 enum ice_status status;
1685 buf_len = ice_struct_size(buf, elem, num - 1);
1686 buf = (struct ice_aqc_alloc_free_res_elem *)
1687 ice_malloc(hw, buf_len);
1689 return ICE_ERR_NO_MEMORY;
1691 /* Prepare buffer to allocate resource. */
1692 buf->num_elems = CPU_TO_LE16(num);
1693 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1694 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1696 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1698 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1699 ice_aqc_opc_alloc_res, NULL);
1701 goto ice_alloc_res_exit;
1703 ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
1704 ICE_NONDMA_TO_NONDMA);
1712 * ice_free_hw_res - free allocated HW resource
1713 * @hw: pointer to the HW struct
1714 * @type: type of resource to free
1715 * @num: number of resources
1716 * @res: pointer to array that contains the resources to free
1719 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1721 struct ice_aqc_alloc_free_res_elem *buf;
1722 enum ice_status status;
1725 buf_len = ice_struct_size(buf, elem, num - 1);
1726 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1728 return ICE_ERR_NO_MEMORY;
1730 /* Prepare buffer to free resource. */
1731 buf->num_elems = CPU_TO_LE16(num);
1732 buf->res_type = CPU_TO_LE16(type);
1733 ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
1734 ICE_NONDMA_TO_NONDMA);
1736 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1737 ice_aqc_opc_free_res, NULL);
1739 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1746 * ice_get_num_per_func - determine number of resources per PF
1747 * @hw: pointer to the HW structure
1748 * @max: value to be evenly split between each PF
1750 * Determine the number of valid functions by going through the bitmap returned
1751 * from parsing capabilities and use this to calculate the number of resources
1752 * per PF based on the max value passed in.
1754 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1758 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1759 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1760 ICE_CAPS_VALID_FUNCS_M);
1769 * ice_parse_caps - parse function/device capabilities
1770 * @hw: pointer to the HW struct
1771 * @buf: pointer to a buffer containing function/device capability records
1772 * @cap_count: number of capability records in the list
1773 * @opc: type of capabilities list to parse
1775 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1778 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1779 enum ice_adminq_opc opc)
1781 struct ice_aqc_list_caps_elem *cap_resp;
1782 struct ice_hw_func_caps *func_p = NULL;
1783 struct ice_hw_dev_caps *dev_p = NULL;
1784 struct ice_hw_common_caps *caps;
1791 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1793 if (opc == ice_aqc_opc_list_dev_caps) {
1794 dev_p = &hw->dev_caps;
1795 caps = &dev_p->common_cap;
1797 } else if (opc == ice_aqc_opc_list_func_caps) {
1798 func_p = &hw->func_caps;
1799 caps = &func_p->common_cap;
1800 prefix = "func cap";
1802 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1806 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1807 u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
1808 u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
1809 u32 number = LE32_TO_CPU(cap_resp->number);
1810 u16 cap = LE16_TO_CPU(cap_resp->cap);
1813 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1814 caps->valid_functions = number;
1815 ice_debug(hw, ICE_DBG_INIT,
1816 "%s: valid_functions (bitmap) = %d\n", prefix,
1817 caps->valid_functions);
1819 /* store func count for resource management purposes */
1821 dev_p->num_funcs = ice_hweight32(number);
1823 case ICE_AQC_CAPS_VSI:
1825 dev_p->num_vsi_allocd_to_host = number;
1826 ice_debug(hw, ICE_DBG_INIT,
1827 "%s: num_vsi_allocd_to_host = %d\n",
1829 dev_p->num_vsi_allocd_to_host);
1830 } else if (func_p) {
1831 func_p->guar_num_vsi =
1832 ice_get_num_per_func(hw, ICE_MAX_VSI);
1833 ice_debug(hw, ICE_DBG_INIT,
1834 "%s: guar_num_vsi (fw) = %d\n",
1836 ice_debug(hw, ICE_DBG_INIT,
1837 "%s: guar_num_vsi = %d\n",
1838 prefix, func_p->guar_num_vsi);
1841 case ICE_AQC_CAPS_DCB:
1842 caps->dcb = (number == 1);
1843 caps->active_tc_bitmap = logical_id;
1844 caps->maxtc = phys_id;
1845 ice_debug(hw, ICE_DBG_INIT,
1846 "%s: dcb = %d\n", prefix, caps->dcb);
1847 ice_debug(hw, ICE_DBG_INIT,
1848 "%s: active_tc_bitmap = %d\n", prefix,
1849 caps->active_tc_bitmap);
1850 ice_debug(hw, ICE_DBG_INIT,
1851 "%s: maxtc = %d\n", prefix, caps->maxtc);
1853 case ICE_AQC_CAPS_RSS:
1854 caps->rss_table_size = number;
1855 caps->rss_table_entry_width = logical_id;
1856 ice_debug(hw, ICE_DBG_INIT,
1857 "%s: rss_table_size = %d\n", prefix,
1858 caps->rss_table_size);
1859 ice_debug(hw, ICE_DBG_INIT,
1860 "%s: rss_table_entry_width = %d\n", prefix,
1861 caps->rss_table_entry_width);
1863 case ICE_AQC_CAPS_RXQS:
1864 caps->num_rxq = number;
1865 caps->rxq_first_id = phys_id;
1866 ice_debug(hw, ICE_DBG_INIT,
1867 "%s: num_rxq = %d\n", prefix,
1869 ice_debug(hw, ICE_DBG_INIT,
1870 "%s: rxq_first_id = %d\n", prefix,
1871 caps->rxq_first_id);
1873 case ICE_AQC_CAPS_TXQS:
1874 caps->num_txq = number;
1875 caps->txq_first_id = phys_id;
1876 ice_debug(hw, ICE_DBG_INIT,
1877 "%s: num_txq = %d\n", prefix,
1879 ice_debug(hw, ICE_DBG_INIT,
1880 "%s: txq_first_id = %d\n", prefix,
1881 caps->txq_first_id);
1883 case ICE_AQC_CAPS_MSIX:
1884 caps->num_msix_vectors = number;
1885 caps->msix_vector_first_id = phys_id;
1886 ice_debug(hw, ICE_DBG_INIT,
1887 "%s: num_msix_vectors = %d\n", prefix,
1888 caps->num_msix_vectors);
1889 ice_debug(hw, ICE_DBG_INIT,
1890 "%s: msix_vector_first_id = %d\n", prefix,
1891 caps->msix_vector_first_id);
1893 case ICE_AQC_CAPS_FD:
1895 dev_p->num_flow_director_fltr = number;
1896 ice_debug(hw, ICE_DBG_INIT,
1897 "%s: num_flow_director_fltr = %d\n",
1899 dev_p->num_flow_director_fltr);
1903 if (hw->dcf_enabled)
1905 reg_val = rd32(hw, GLQF_FD_SIZE);
1906 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1907 GLQF_FD_SIZE_FD_GSIZE_S;
1908 func_p->fd_fltr_guar =
1909 ice_get_num_per_func(hw, val);
1910 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1911 GLQF_FD_SIZE_FD_BSIZE_S;
1912 func_p->fd_fltr_best_effort = val;
1913 ice_debug(hw, ICE_DBG_INIT,
1914 "%s: fd_fltr_guar = %d\n",
1915 prefix, func_p->fd_fltr_guar);
1916 ice_debug(hw, ICE_DBG_INIT,
1917 "%s: fd_fltr_best_effort = %d\n",
1918 prefix, func_p->fd_fltr_best_effort);
1921 case ICE_AQC_CAPS_MAX_MTU:
1922 caps->max_mtu = number;
1923 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1924 prefix, caps->max_mtu);
1927 ice_debug(hw, ICE_DBG_INIT,
1928 "%s: unknown capability[%d]: 0x%x\n", prefix,
1934 /* Re-calculate capabilities that are dependent on the number of
1935 * physical ports; i.e. some features are not supported or function
1936 * differently on devices with more than 4 ports.
1938 if (hw->dev_caps.num_funcs > 4) {
1939 /* Max 4 TCs per port */
1941 ice_debug(hw, ICE_DBG_INIT,
1942 "%s: maxtc = %d (based on #ports)\n", prefix,
1948 * ice_aq_discover_caps - query function/device capabilities
1949 * @hw: pointer to the HW struct
1950 * @buf: a virtual buffer to hold the capabilities
1951 * @buf_size: Size of the virtual buffer
1952 * @cap_count: cap count needed if AQ err==ENOMEM
1953 * @opc: capabilities type to discover - pass in the command opcode
1954 * @cd: pointer to command details structure or NULL
1956 * Get the function(0x000a)/device(0x000b) capabilities description from
1959 static enum ice_status
1960 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1961 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1963 struct ice_aqc_list_caps *cmd;
1964 struct ice_aq_desc desc;
1965 enum ice_status status;
1967 cmd = &desc.params.get_cap;
1969 if (opc != ice_aqc_opc_list_func_caps &&
1970 opc != ice_aqc_opc_list_dev_caps)
1971 return ICE_ERR_PARAM;
1973 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1975 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1977 ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc);
1978 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1979 *cap_count = LE32_TO_CPU(cmd->count);
1984 * ice_discover_caps - get info about the HW
1985 * @hw: pointer to the hardware structure
1986 * @opc: capabilities type to discover - pass in the command opcode
1988 static enum ice_status
1989 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1991 enum ice_status status;
1996 /* The driver doesn't know how many capabilities the device will return
1997 * so the buffer size required isn't known ahead of time. The driver
1998 * starts with cbuf_len and if this turns out to be insufficient, the
1999 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
2000 * The driver then allocates the buffer based on the count and retries
2001 * the operation. So it follows that the retry count is 2.
2003 #define ICE_GET_CAP_BUF_COUNT 40
2004 #define ICE_GET_CAP_RETRY_COUNT 2
2006 cap_count = ICE_GET_CAP_BUF_COUNT;
2007 retries = ICE_GET_CAP_RETRY_COUNT;
2012 cbuf_len = (u16)(cap_count *
2013 sizeof(struct ice_aqc_list_caps_elem));
2014 cbuf = ice_malloc(hw, cbuf_len);
2016 return ICE_ERR_NO_MEMORY;
2018 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
2022 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
2025 /* If ENOMEM is returned, try again with bigger buffer */
2026 } while (--retries);
2032 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2033 * @hw: pointer to the hardware structure
2035 void ice_set_safe_mode_caps(struct ice_hw *hw)
2037 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2038 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2039 u32 valid_func, rxq_first_id, txq_first_id;
2040 u32 msix_vector_first_id, max_mtu;
2043 /* cache some func_caps values that should be restored after memset */
2044 valid_func = func_caps->common_cap.valid_functions;
2045 txq_first_id = func_caps->common_cap.txq_first_id;
2046 rxq_first_id = func_caps->common_cap.rxq_first_id;
2047 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
2048 max_mtu = func_caps->common_cap.max_mtu;
2050 /* unset func capabilities */
2051 memset(func_caps, 0, sizeof(*func_caps));
2053 /* restore cached values */
2054 func_caps->common_cap.valid_functions = valid_func;
2055 func_caps->common_cap.txq_first_id = txq_first_id;
2056 func_caps->common_cap.rxq_first_id = rxq_first_id;
2057 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2058 func_caps->common_cap.max_mtu = max_mtu;
2060 /* one Tx and one Rx queue in safe mode */
2061 func_caps->common_cap.num_rxq = 1;
2062 func_caps->common_cap.num_txq = 1;
2064 /* two MSIX vectors, one for traffic and one for misc causes */
2065 func_caps->common_cap.num_msix_vectors = 2;
2066 func_caps->guar_num_vsi = 1;
2068 /* cache some dev_caps values that should be restored after memset */
2069 valid_func = dev_caps->common_cap.valid_functions;
2070 txq_first_id = dev_caps->common_cap.txq_first_id;
2071 rxq_first_id = dev_caps->common_cap.rxq_first_id;
2072 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
2073 max_mtu = dev_caps->common_cap.max_mtu;
2074 num_funcs = dev_caps->num_funcs;
2076 /* unset dev capabilities */
2077 memset(dev_caps, 0, sizeof(*dev_caps));
2079 /* restore cached values */
2080 dev_caps->common_cap.valid_functions = valid_func;
2081 dev_caps->common_cap.txq_first_id = txq_first_id;
2082 dev_caps->common_cap.rxq_first_id = rxq_first_id;
2083 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2084 dev_caps->common_cap.max_mtu = max_mtu;
2085 dev_caps->num_funcs = num_funcs;
2087 /* one Tx and one Rx queue per function in safe mode */
2088 dev_caps->common_cap.num_rxq = num_funcs;
2089 dev_caps->common_cap.num_txq = num_funcs;
2091 /* two MSIX vectors per function */
2092 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2096 * ice_get_caps - get info about the HW
2097 * @hw: pointer to the hardware structure
2099 enum ice_status ice_get_caps(struct ice_hw *hw)
2101 enum ice_status status;
2103 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
2105 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
2111 * ice_aq_manage_mac_write - manage MAC address write command
2112 * @hw: pointer to the HW struct
2113 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2114 * @flags: flags to control write behavior
2115 * @cd: pointer to command details structure or NULL
2117 * This function is used to write MAC address to the NVM (0x0108).
2120 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2121 struct ice_sq_cd *cd)
2123 struct ice_aqc_manage_mac_write *cmd;
2124 struct ice_aq_desc desc;
2126 cmd = &desc.params.mac_write;
2127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2130 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
2132 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2136 * ice_aq_clear_pxe_mode
2137 * @hw: pointer to the HW struct
2139 * Tell the firmware that the driver is taking over from PXE (0x0110).
2141 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2143 struct ice_aq_desc desc;
2145 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2146 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2148 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2152 * ice_clear_pxe_mode - clear pxe operations mode
2153 * @hw: pointer to the HW struct
2155 * Make sure all PXE mode settings are cleared, including things
2156 * like descriptor fetch/write-back mode.
2158 void ice_clear_pxe_mode(struct ice_hw *hw)
2160 if (ice_check_sq_alive(hw, &hw->adminq))
2161 ice_aq_clear_pxe_mode(hw);
2165 * ice_get_link_speed_based_on_phy_type - returns link speed
2166 * @phy_type_low: lower part of phy_type
2167 * @phy_type_high: higher part of phy_type
2169 * This helper function will convert an entry in PHY type structure
2170 * [phy_type_low, phy_type_high] to its corresponding link speed.
2171 * Note: In the structure of [phy_type_low, phy_type_high], there should
2172 * be one bit set, as this function will convert one PHY type to its
2174 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2175 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2178 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2180 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2181 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2183 switch (phy_type_low) {
2184 case ICE_PHY_TYPE_LOW_100BASE_TX:
2185 case ICE_PHY_TYPE_LOW_100M_SGMII:
2186 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2188 case ICE_PHY_TYPE_LOW_1000BASE_T:
2189 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2190 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2191 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2192 case ICE_PHY_TYPE_LOW_1G_SGMII:
2193 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2195 case ICE_PHY_TYPE_LOW_2500BASE_T:
2196 case ICE_PHY_TYPE_LOW_2500BASE_X:
2197 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2198 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2200 case ICE_PHY_TYPE_LOW_5GBASE_T:
2201 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2202 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2204 case ICE_PHY_TYPE_LOW_10GBASE_T:
2205 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2206 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2207 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2208 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2209 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2210 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2211 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2213 case ICE_PHY_TYPE_LOW_25GBASE_T:
2214 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2215 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2216 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2217 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2218 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2219 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2220 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2221 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2222 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2223 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2224 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2226 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2227 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2228 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2229 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2230 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2231 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2232 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2234 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2235 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2236 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2237 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2238 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2239 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2240 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2241 case ICE_PHY_TYPE_LOW_50G_AUI2:
2242 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2243 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2244 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2245 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2246 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2247 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2248 case ICE_PHY_TYPE_LOW_50G_AUI1:
2249 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2251 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2252 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2253 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2254 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2255 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2256 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2257 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2258 case ICE_PHY_TYPE_LOW_100G_AUI4:
2259 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2260 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2261 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2262 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2263 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2264 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2267 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2271 switch (phy_type_high) {
2272 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2273 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2274 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2275 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2276 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2277 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2280 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2284 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2285 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2286 return ICE_AQ_LINK_SPEED_UNKNOWN;
2287 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2288 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2289 return ICE_AQ_LINK_SPEED_UNKNOWN;
2290 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2291 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2292 return speed_phy_type_low;
2294 return speed_phy_type_high;
2298 * ice_update_phy_type
2299 * @phy_type_low: pointer to the lower part of phy_type
2300 * @phy_type_high: pointer to the higher part of phy_type
2301 * @link_speeds_bitmap: targeted link speeds bitmap
2303 * Note: For the link_speeds_bitmap structure, you can check it at
2304 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2305 * link_speeds_bitmap include multiple speeds.
2307 * Each entry in this [phy_type_low, phy_type_high] structure will
2308 * present a certain link speed. This helper function will turn on bits
2309 * in [phy_type_low, phy_type_high] structure based on the value of
2310 * link_speeds_bitmap input parameter.
2313 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2314 u16 link_speeds_bitmap)
2321 /* We first check with low part of phy_type */
2322 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2323 pt_low = BIT_ULL(index);
2324 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2326 if (link_speeds_bitmap & speed)
2327 *phy_type_low |= BIT_ULL(index);
2330 /* We then check with high part of phy_type */
2331 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2332 pt_high = BIT_ULL(index);
2333 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2335 if (link_speeds_bitmap & speed)
2336 *phy_type_high |= BIT_ULL(index);
2341 * ice_aq_set_phy_cfg
2342 * @hw: pointer to the HW struct
2343 * @pi: port info structure of the interested logical port
2344 * @cfg: structure with PHY configuration data to be set
2345 * @cd: pointer to command details structure or NULL
2347 * Set the various PHY configuration parameters supported on the Port.
2348 * One or more of the Set PHY config parameters may be ignored in an MFP
2349 * mode as the PF may not have the privilege to set some of the PHY Config
2350 * parameters. This status will be indicated by the command response (0x0601).
2353 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2354 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2356 struct ice_aq_desc desc;
2357 enum ice_status status;
2360 return ICE_ERR_PARAM;
2362 /* Ensure that only valid bits of cfg->caps can be turned on. */
2363 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2364 ice_debug(hw, ICE_DBG_PHY,
2365 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2368 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2371 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2372 desc.params.set_phy.lport_num = pi->lport;
2373 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2375 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2376 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2377 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2378 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2379 ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2380 ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl_an = 0x%x\n",
2381 cfg->low_power_ctrl_an);
2382 ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2383 ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2384 ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2386 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2388 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2389 status = ICE_SUCCESS;
2392 pi->phy.curr_user_phy_cfg = *cfg;
2398 * ice_update_link_info - update status of the HW network link
2399 * @pi: port info structure of the interested logical port
2401 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2403 struct ice_link_status *li;
2404 enum ice_status status;
2407 return ICE_ERR_PARAM;
2409 li = &pi->phy.link_info;
2411 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2415 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2416 struct ice_aqc_get_phy_caps_data *pcaps;
2420 pcaps = (struct ice_aqc_get_phy_caps_data *)
2421 ice_malloc(hw, sizeof(*pcaps));
2423 return ICE_ERR_NO_MEMORY;
2425 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2427 if (status == ICE_SUCCESS)
2428 ice_memcpy(li->module_type, &pcaps->module_type,
2429 sizeof(li->module_type),
2430 ICE_NONDMA_TO_NONDMA);
2432 ice_free(hw, pcaps);
2439 * ice_cache_phy_user_req
2440 * @pi: port information structure
2441 * @cache_data: PHY logging data
2442 * @cache_mode: PHY logging mode
2444 * Log the user request on (FC, FEC, SPEED) for later user.
2447 ice_cache_phy_user_req(struct ice_port_info *pi,
2448 struct ice_phy_cache_mode_data cache_data,
2449 enum ice_phy_cache_mode cache_mode)
2454 switch (cache_mode) {
2456 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2458 case ICE_SPEED_MODE:
2459 pi->phy.curr_user_speed_req =
2460 cache_data.data.curr_user_speed_req;
2463 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2471 * ice_caps_to_fc_mode
2472 * @caps: PHY capabilities
2474 * Convert PHY FC capabilities to ice FC mode
2476 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2478 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2479 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2482 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2483 return ICE_FC_TX_PAUSE;
2485 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2486 return ICE_FC_RX_PAUSE;
2492 * ice_caps_to_fec_mode
2493 * @caps: PHY capabilities
2494 * @fec_options: Link FEC options
2496 * Convert PHY FEC capabilities to ice FEC mode
2498 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2500 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2501 return ICE_FEC_AUTO;
2503 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2504 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2505 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2506 ICE_AQC_PHY_FEC_25G_KR_REQ))
2507 return ICE_FEC_BASER;
2509 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2510 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2511 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2514 return ICE_FEC_NONE;
2517 static enum ice_status
2518 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2519 enum ice_fc_mode req_mode)
2521 struct ice_aqc_get_phy_caps_data *pcaps = NULL;
2522 struct ice_phy_cache_mode_data cache_data;
2523 enum ice_status status = ICE_SUCCESS;
2524 u8 pause_mask = 0x0;
2527 return ICE_ERR_BAD_PTR;
2529 pcaps = (struct ice_aqc_get_phy_caps_data *)
2530 ice_malloc(pi->hw, sizeof(*pcaps));
2532 return ICE_ERR_NO_MEMORY;
2534 /* Cache user FC request */
2535 cache_data.data.curr_user_fc_req = req_mode;
2536 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2540 /* Query the value of FC that both the NIC and attached media
2543 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2548 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2549 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2552 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2553 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2555 case ICE_FC_RX_PAUSE:
2556 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2558 case ICE_FC_TX_PAUSE:
2559 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2565 /* clear the old pause settings */
2566 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2567 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2569 /* set the new capabilities */
2570 cfg->caps |= pause_mask;
2573 ice_free(pi->hw, pcaps);
2579 * @pi: port information structure
2580 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2581 * @ena_auto_link_update: enable automatic link update
2583 * Set the requested flow control mode.
2586 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2588 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2589 struct ice_aqc_get_phy_caps_data *pcaps;
2590 enum ice_status status;
2593 if (!pi || !aq_failures)
2594 return ICE_ERR_BAD_PTR;
2598 pcaps = (struct ice_aqc_get_phy_caps_data *)
2599 ice_malloc(hw, sizeof(*pcaps));
2601 return ICE_ERR_NO_MEMORY;
2603 /* Get the current PHY config */
2604 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2607 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2611 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2613 /* Configure the set phy data */
2614 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2616 if (status != ICE_ERR_BAD_PTR)
2617 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2622 /* If the capabilities have changed, then set the new config */
2623 if (cfg.caps != pcaps->caps) {
2624 int retry_count, retry_max = 10;
2626 /* Auto restart link so settings take effect */
2627 if (ena_auto_link_update)
2628 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2630 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2632 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2636 /* Update the link info
2637 * It sometimes takes a really long time for link to
2638 * come back from the atomic reset. Thus, we wait a
2641 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2642 status = ice_update_link_info(pi);
2644 if (status == ICE_SUCCESS)
2647 ice_msec_delay(100, true);
2651 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2655 ice_free(hw, pcaps);
2660 * ice_phy_caps_equals_cfg
2661 * @phy_caps: PHY capabilities
2662 * @phy_cfg: PHY configuration
2664 * Helper function to determine if PHY capabilities matches PHY
2668 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2669 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2671 u8 caps_mask, cfg_mask;
2673 if (!phy_caps || !phy_cfg)
2676 /* These bits are not common between capabilities and configuration.
2677 * Do not use them to determine equality.
2679 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2680 ICE_AQC_PHY_EN_MOD_QUAL);
2681 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2683 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2684 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2685 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2686 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2687 phy_caps->eee_cap != phy_cfg->eee_cap ||
2688 phy_caps->eeer_value != phy_cfg->eeer_value ||
2689 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2696 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2697 * @pi: port information structure
2698 * @caps: PHY ability structure to copy date from
2699 * @cfg: PHY configuration structure to copy data to
2701 * Helper function to copy AQC PHY get ability data to PHY set configuration
2705 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2706 struct ice_aqc_get_phy_caps_data *caps,
2707 struct ice_aqc_set_phy_cfg_data *cfg)
2709 if (!pi || !caps || !cfg)
2712 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
2713 cfg->phy_type_low = caps->phy_type_low;
2714 cfg->phy_type_high = caps->phy_type_high;
2715 cfg->caps = caps->caps;
2716 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2717 cfg->eee_cap = caps->eee_cap;
2718 cfg->eeer_value = caps->eeer_value;
2719 cfg->link_fec_opt = caps->link_fec_options;
2720 cfg->module_compliance_enforcement =
2721 caps->module_compliance_enforcement;
2723 if (ice_fw_supports_link_override(pi->hw)) {
2724 struct ice_link_default_override_tlv tlv;
2726 if (ice_get_link_default_override(&tlv, pi))
2729 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2730 cfg->module_compliance_enforcement |=
2731 ICE_LINK_OVERRIDE_STRICT_MODE;
2736 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2737 * @pi: port information structure
2738 * @cfg: PHY configuration data to set FEC mode
2739 * @fec: FEC mode to configure
2742 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2743 enum ice_fec_mode fec)
2745 struct ice_aqc_get_phy_caps_data *pcaps;
2746 enum ice_status status = ICE_SUCCESS;
2750 return ICE_ERR_BAD_PTR;
2754 pcaps = (struct ice_aqc_get_phy_caps_data *)
2755 ice_malloc(hw, sizeof(*pcaps));
2757 return ICE_ERR_NO_MEMORY;
2759 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
2766 /* Clear RS bits, and AND BASE-R ability
2767 * bits and OR request bits.
2769 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2770 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2771 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2772 ICE_AQC_PHY_FEC_25G_KR_REQ;
2775 /* Clear BASE-R bits, and AND RS ability
2776 * bits and OR request bits.
2778 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2779 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2780 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2783 /* Clear all FEC option bits. */
2784 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2787 /* AND auto FEC bit, and all caps bits. */
2788 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2789 cfg->link_fec_opt |= pcaps->link_fec_options;
2792 status = ICE_ERR_PARAM;
2796 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
2797 struct ice_link_default_override_tlv tlv;
2799 if (ice_get_link_default_override(&tlv, pi))
2802 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
2803 (tlv.options & ICE_LINK_OVERRIDE_EN))
2804 cfg->link_fec_opt = tlv.fec_options;
2808 ice_free(hw, pcaps);
2814 * ice_get_link_status - get status of the HW network link
2815 * @pi: port information structure
2816 * @link_up: pointer to bool (true/false = linkup/linkdown)
2818 * Variable link_up is true if link is up, false if link is down.
2819 * The variable link_up is invalid if status is non zero. As a
2820 * result of this call, link status reporting becomes enabled
2822 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2824 struct ice_phy_info *phy_info;
2825 enum ice_status status = ICE_SUCCESS;
2827 if (!pi || !link_up)
2828 return ICE_ERR_PARAM;
2830 phy_info = &pi->phy;
2832 if (phy_info->get_link_info) {
2833 status = ice_update_link_info(pi);
2836 ice_debug(pi->hw, ICE_DBG_LINK,
2837 "get link status error, status = %d\n",
2841 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2847 * ice_aq_set_link_restart_an
2848 * @pi: pointer to the port information structure
2849 * @ena_link: if true: enable link, if false: disable link
2850 * @cd: pointer to command details structure or NULL
2852 * Sets up the link and restarts the Auto-Negotiation over the link.
2855 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2856 struct ice_sq_cd *cd)
2858 struct ice_aqc_restart_an *cmd;
2859 struct ice_aq_desc desc;
2861 cmd = &desc.params.restart_an;
2863 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2865 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2866 cmd->lport_num = pi->lport;
2868 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2870 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2872 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2876 * ice_aq_set_event_mask
2877 * @hw: pointer to the HW struct
2878 * @port_num: port number of the physical function
2879 * @mask: event mask to be set
2880 * @cd: pointer to command details structure or NULL
2882 * Set event mask (0x0613)
2885 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2886 struct ice_sq_cd *cd)
2888 struct ice_aqc_set_event_mask *cmd;
2889 struct ice_aq_desc desc;
2891 cmd = &desc.params.set_event_mask;
2893 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2895 cmd->lport_num = port_num;
2897 cmd->event_mask = CPU_TO_LE16(mask);
2898 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2902 * ice_aq_set_mac_loopback
2903 * @hw: pointer to the HW struct
2904 * @ena_lpbk: Enable or Disable loopback
2905 * @cd: pointer to command details structure or NULL
2907 * Enable/disable loopback on a given port
2910 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2912 struct ice_aqc_set_mac_lb *cmd;
2913 struct ice_aq_desc desc;
2915 cmd = &desc.params.set_mac_lb;
2917 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2919 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2921 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2925 * ice_aq_set_port_id_led
2926 * @pi: pointer to the port information
2927 * @is_orig_mode: is this LED set to original mode (by the net-list)
2928 * @cd: pointer to command details structure or NULL
2930 * Set LED value for the given port (0x06e9)
2933 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2934 struct ice_sq_cd *cd)
2936 struct ice_aqc_set_port_id_led *cmd;
2937 struct ice_hw *hw = pi->hw;
2938 struct ice_aq_desc desc;
2940 cmd = &desc.params.set_port_id_led;
2942 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2945 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2947 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2949 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2954 * @hw: pointer to the HW struct
2955 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
2956 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
2957 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
2959 * @set_page: set or ignore the page
2960 * @data: pointer to data buffer to be read/written to the I2C device.
2961 * @length: 1-16 for read, 1 for write.
2962 * @write: 0 read, 1 for write.
2963 * @cd: pointer to command details structure or NULL
2965 * Read/Write SFF EEPROM (0x06EE)
2968 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
2969 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
2970 bool write, struct ice_sq_cd *cd)
2972 struct ice_aqc_sff_eeprom *cmd;
2973 struct ice_aq_desc desc;
2974 enum ice_status status;
2976 if (!data || (mem_addr & 0xff00))
2977 return ICE_ERR_PARAM;
2979 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
2980 cmd = &desc.params.read_write_sff_param;
2981 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
2982 cmd->lport_num = (u8)(lport & 0xff);
2983 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
2984 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
2985 ICE_AQC_SFF_I2CBUS_7BIT_M) |
2987 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
2988 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
2989 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
2990 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
2992 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
2994 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
2999 * __ice_aq_get_set_rss_lut
3000 * @hw: pointer to the hardware structure
3001 * @vsi_id: VSI FW index
3002 * @lut_type: LUT table type
3003 * @lut: pointer to the LUT buffer provided by the caller
3004 * @lut_size: size of the LUT buffer
3005 * @glob_lut_idx: global LUT index
3006 * @set: set true to set the table, false to get the table
3008 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3010 static enum ice_status
3011 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3012 u16 lut_size, u8 glob_lut_idx, bool set)
3014 struct ice_aqc_get_set_rss_lut *cmd_resp;
3015 struct ice_aq_desc desc;
3016 enum ice_status status;
3019 cmd_resp = &desc.params.get_set_rss_lut;
3022 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3023 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3025 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3028 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3029 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3030 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3031 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3034 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3035 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3036 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3037 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3038 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3041 status = ICE_ERR_PARAM;
3042 goto ice_aq_get_set_rss_lut_exit;
3045 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3046 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3047 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3050 goto ice_aq_get_set_rss_lut_send;
3051 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3053 goto ice_aq_get_set_rss_lut_send;
3055 goto ice_aq_get_set_rss_lut_send;
3058 /* LUT size is only valid for Global and PF table types */
3060 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3061 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3062 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3063 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3065 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3066 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3067 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3068 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3070 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3071 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3072 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3073 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3074 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3079 status = ICE_ERR_PARAM;
3080 goto ice_aq_get_set_rss_lut_exit;
3083 ice_aq_get_set_rss_lut_send:
3084 cmd_resp->flags = CPU_TO_LE16(flags);
3085 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3087 ice_aq_get_set_rss_lut_exit:
3092 * ice_aq_get_rss_lut
3093 * @hw: pointer to the hardware structure
3094 * @vsi_handle: software VSI handle
3095 * @lut_type: LUT table type
3096 * @lut: pointer to the LUT buffer provided by the caller
3097 * @lut_size: size of the LUT buffer
3099 * get the RSS lookup table, PF or VSI type
3102 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3103 u8 *lut, u16 lut_size)
3105 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3106 return ICE_ERR_PARAM;
3108 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3109 lut_type, lut, lut_size, 0, false);
3113 * ice_aq_set_rss_lut
3114 * @hw: pointer to the hardware structure
3115 * @vsi_handle: software VSI handle
3116 * @lut_type: LUT table type
3117 * @lut: pointer to the LUT buffer provided by the caller
3118 * @lut_size: size of the LUT buffer
3120 * set the RSS lookup table, PF or VSI type
3123 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3124 u8 *lut, u16 lut_size)
3126 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3127 return ICE_ERR_PARAM;
3129 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3130 lut_type, lut, lut_size, 0, true);
3134 * __ice_aq_get_set_rss_key
3135 * @hw: pointer to the HW struct
3136 * @vsi_id: VSI FW index
3137 * @key: pointer to key info struct
3138 * @set: set true to set the key, false to get the key
3140 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3143 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3144 struct ice_aqc_get_set_rss_keys *key,
3147 struct ice_aqc_get_set_rss_key *cmd_resp;
3148 u16 key_size = sizeof(*key);
3149 struct ice_aq_desc desc;
3151 cmd_resp = &desc.params.get_set_rss_key;
3154 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3155 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3157 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3160 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3161 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3162 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3163 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3165 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3169 * ice_aq_get_rss_key
3170 * @hw: pointer to the HW struct
3171 * @vsi_handle: software VSI handle
3172 * @key: pointer to key info struct
3174 * get the RSS key per VSI
3177 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3178 struct ice_aqc_get_set_rss_keys *key)
3180 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3181 return ICE_ERR_PARAM;
3183 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3188 * ice_aq_set_rss_key
3189 * @hw: pointer to the HW struct
3190 * @vsi_handle: software VSI handle
3191 * @keys: pointer to key info struct
3193 * set the RSS key per VSI
3196 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3197 struct ice_aqc_get_set_rss_keys *keys)
3199 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3200 return ICE_ERR_PARAM;
3202 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3207 * ice_aq_add_lan_txq
3208 * @hw: pointer to the hardware structure
3209 * @num_qgrps: Number of added queue groups
3210 * @qg_list: list of queue groups to be added
3211 * @buf_size: size of buffer for indirect command
3212 * @cd: pointer to command details structure or NULL
3214 * Add Tx LAN queue (0x0C30)
3217 * Prior to calling add Tx LAN queue:
3218 * Initialize the following as part of the Tx queue context:
3219 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3220 * Cache profile and Packet shaper profile.
3222 * After add Tx LAN queue AQ command is completed:
3223 * Interrupts should be associated with specific queues,
3224 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3228 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3229 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3230 struct ice_sq_cd *cd)
3232 u16 i, sum_header_size, sum_q_size = 0;
3233 struct ice_aqc_add_tx_qgrp *list;
3234 struct ice_aqc_add_txqs *cmd;
3235 struct ice_aq_desc desc;
3237 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3239 cmd = &desc.params.add_txqs;
3241 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3244 return ICE_ERR_PARAM;
3246 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3247 return ICE_ERR_PARAM;
3249 sum_header_size = num_qgrps *
3250 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
3253 for (i = 0; i < num_qgrps; i++) {
3254 struct ice_aqc_add_txqs_perq *q = list->txqs;
3256 sum_q_size += list->num_txqs * sizeof(*q);
3257 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
3260 if (buf_size != (sum_header_size + sum_q_size))
3261 return ICE_ERR_PARAM;
3263 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3265 cmd->num_qgrps = num_qgrps;
3267 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3271 * ice_aq_dis_lan_txq
3272 * @hw: pointer to the hardware structure
3273 * @num_qgrps: number of groups in the list
3274 * @qg_list: the list of groups to disable
3275 * @buf_size: the total size of the qg_list buffer in bytes
3276 * @rst_src: if called due to reset, specifies the reset source
3277 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3278 * @cd: pointer to command details structure or NULL
3280 * Disable LAN Tx queue (0x0C31)
3282 static enum ice_status
3283 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3284 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3285 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3286 struct ice_sq_cd *cd)
3288 struct ice_aqc_dis_txqs *cmd;
3289 struct ice_aq_desc desc;
3290 enum ice_status status;
3293 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3294 cmd = &desc.params.dis_txqs;
3295 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3297 /* qg_list can be NULL only in VM/VF reset flow */
3298 if (!qg_list && !rst_src)
3299 return ICE_ERR_PARAM;
3301 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3302 return ICE_ERR_PARAM;
3304 cmd->num_entries = num_qgrps;
3306 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3307 ICE_AQC_Q_DIS_TIMEOUT_M);
3311 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3312 cmd->vmvf_and_timeout |=
3313 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3320 /* flush pipe on time out */
3321 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3322 /* If no queue group info, we are in a reset flow. Issue the AQ */
3326 /* set RD bit to indicate that command buffer is provided by the driver
3327 * and it needs to be read by the firmware
3329 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3331 for (i = 0; i < num_qgrps; ++i) {
3332 /* Calculate the size taken up by the queue IDs in this group */
3333 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
3335 /* Add the size of the group header */
3336 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
3338 /* If the num of queues is even, add 2 bytes of padding */
3339 if ((qg_list[i].num_qs % 2) == 0)
3344 return ICE_ERR_PARAM;
3347 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3350 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3351 vmvf_num, hw->adminq.sq_last_status);
3353 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3354 LE16_TO_CPU(qg_list[0].q_id[0]),
3355 hw->adminq.sq_last_status);
3361 * ice_aq_move_recfg_lan_txq
3362 * @hw: pointer to the hardware structure
3363 * @num_qs: number of queues to move/reconfigure
3364 * @is_move: true if this operation involves node movement
3365 * @is_tc_change: true if this operation involves a TC change
3366 * @subseq_call: true if this operation is a subsequent call
3367 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3368 * @timeout: timeout in units of 100 usec (valid values 0-50)
3369 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3370 * @buf: struct containing src/dest TEID and per-queue info
3371 * @buf_size: size of buffer for indirect command
3372 * @txqs_moved: out param, number of queues successfully moved
3373 * @cd: pointer to command details structure or NULL
3375 * Move / Reconfigure Tx LAN queues (0x0C32)
3378 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3379 bool is_tc_change, bool subseq_call, bool flush_pipe,
3380 u8 timeout, u32 *blocked_cgds,
3381 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3382 u8 *txqs_moved, struct ice_sq_cd *cd)
3384 struct ice_aqc_move_txqs *cmd;
3385 struct ice_aq_desc desc;
3386 enum ice_status status;
3388 cmd = &desc.params.move_txqs;
3389 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3391 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3392 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3393 return ICE_ERR_PARAM;
3395 if (is_tc_change && !flush_pipe && !blocked_cgds)
3396 return ICE_ERR_PARAM;
3398 if (!is_move && !is_tc_change)
3399 return ICE_ERR_PARAM;
3401 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3404 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3407 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3410 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3413 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3415 cmd->num_qs = num_qs;
3416 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3417 ICE_AQC_Q_CMD_TIMEOUT_M);
3419 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3421 if (!status && txqs_moved)
3422 *txqs_moved = cmd->num_qs;
3424 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3425 is_tc_change && !flush_pipe)
3426 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3431 /* End of FW Admin Queue command wrappers */
3434 * ice_write_byte - write a byte to a packed context structure
3435 * @src_ctx: the context structure to read from
3436 * @dest_ctx: the context to be written to
3437 * @ce_info: a description of the struct to be filled
3440 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3442 u8 src_byte, dest_byte, mask;
3446 /* copy from the next struct field */
3447 from = src_ctx + ce_info->offset;
3449 /* prepare the bits and mask */
3450 shift_width = ce_info->lsb % 8;
3451 mask = (u8)(BIT(ce_info->width) - 1);
3456 /* shift to correct alignment */
3457 mask <<= shift_width;
3458 src_byte <<= shift_width;
3460 /* get the current bits from the target bit string */
3461 dest = dest_ctx + (ce_info->lsb / 8);
3463 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3465 dest_byte &= ~mask; /* get the bits not changing */
3466 dest_byte |= src_byte; /* add in the new bits */
3468 /* put it all back */
3469 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3473 * ice_write_word - write a word to a packed context structure
3474 * @src_ctx: the context structure to read from
3475 * @dest_ctx: the context to be written to
3476 * @ce_info: a description of the struct to be filled
3479 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3486 /* copy from the next struct field */
3487 from = src_ctx + ce_info->offset;
3489 /* prepare the bits and mask */
3490 shift_width = ce_info->lsb % 8;
3491 mask = BIT(ce_info->width) - 1;
3493 /* don't swizzle the bits until after the mask because the mask bits
3494 * will be in a different bit position on big endian machines
3496 src_word = *(u16 *)from;
3499 /* shift to correct alignment */
3500 mask <<= shift_width;
3501 src_word <<= shift_width;
3503 /* get the current bits from the target bit string */
3504 dest = dest_ctx + (ce_info->lsb / 8);
3506 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3508 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3509 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3511 /* put it all back */
3512 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3516 * ice_write_dword - write a dword to a packed context structure
3517 * @src_ctx: the context structure to read from
3518 * @dest_ctx: the context to be written to
3519 * @ce_info: a description of the struct to be filled
3522 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3524 u32 src_dword, mask;
3529 /* copy from the next struct field */
3530 from = src_ctx + ce_info->offset;
3532 /* prepare the bits and mask */
3533 shift_width = ce_info->lsb % 8;
3535 /* if the field width is exactly 32 on an x86 machine, then the shift
3536 * operation will not work because the SHL instructions count is masked
3537 * to 5 bits so the shift will do nothing
3539 if (ce_info->width < 32)
3540 mask = BIT(ce_info->width) - 1;
3544 /* don't swizzle the bits until after the mask because the mask bits
3545 * will be in a different bit position on big endian machines
3547 src_dword = *(u32 *)from;
3550 /* shift to correct alignment */
3551 mask <<= shift_width;
3552 src_dword <<= shift_width;
3554 /* get the current bits from the target bit string */
3555 dest = dest_ctx + (ce_info->lsb / 8);
3557 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3559 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3560 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3562 /* put it all back */
3563 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3567 * ice_write_qword - write a qword to a packed context structure
3568 * @src_ctx: the context structure to read from
3569 * @dest_ctx: the context to be written to
3570 * @ce_info: a description of the struct to be filled
3573 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3575 u64 src_qword, mask;
3580 /* copy from the next struct field */
3581 from = src_ctx + ce_info->offset;
3583 /* prepare the bits and mask */
3584 shift_width = ce_info->lsb % 8;
3586 /* if the field width is exactly 64 on an x86 machine, then the shift
3587 * operation will not work because the SHL instructions count is masked
3588 * to 6 bits so the shift will do nothing
3590 if (ce_info->width < 64)
3591 mask = BIT_ULL(ce_info->width) - 1;
3595 /* don't swizzle the bits until after the mask because the mask bits
3596 * will be in a different bit position on big endian machines
3598 src_qword = *(u64 *)from;
3601 /* shift to correct alignment */
3602 mask <<= shift_width;
3603 src_qword <<= shift_width;
3605 /* get the current bits from the target bit string */
3606 dest = dest_ctx + (ce_info->lsb / 8);
3608 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3610 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
3611 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
3613 /* put it all back */
3614 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3618 * ice_set_ctx - set context bits in packed structure
3619 * @hw: pointer to the hardware structure
3620 * @src_ctx: pointer to a generic non-packed context structure
3621 * @dest_ctx: pointer to memory for the packed structure
3622 * @ce_info: a description of the structure to be transformed
3625 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3626 const struct ice_ctx_ele *ce_info)
3630 for (f = 0; ce_info[f].width; f++) {
3631 /* We have to deal with each element of the FW response
3632 * using the correct size so that we are correct regardless
3633 * of the endianness of the machine.
3635 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3636 ice_debug(hw, ICE_DBG_QCTX,
3637 "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3638 f, ce_info[f].width, ce_info[f].size_of);
3641 switch (ce_info[f].size_of) {
3643 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3646 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3649 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3652 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3655 return ICE_ERR_INVAL_SIZE;
3663 * ice_read_byte - read context byte into struct
3664 * @src_ctx: the context structure to read from
3665 * @dest_ctx: the context to be written to
3666 * @ce_info: a description of the struct to be filled
3669 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3675 /* prepare the bits and mask */
3676 shift_width = ce_info->lsb % 8;
3677 mask = (u8)(BIT(ce_info->width) - 1);
3679 /* shift to correct alignment */
3680 mask <<= shift_width;
3682 /* get the current bits from the src bit string */
3683 src = src_ctx + (ce_info->lsb / 8);
3685 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3687 dest_byte &= ~(mask);
3689 dest_byte >>= shift_width;
3691 /* get the address from the struct field */
3692 target = dest_ctx + ce_info->offset;
3694 /* put it back in the struct */
3695 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3699 * ice_read_word - read context word into struct
3700 * @src_ctx: the context structure to read from
3701 * @dest_ctx: the context to be written to
3702 * @ce_info: a description of the struct to be filled
3705 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3707 u16 dest_word, mask;
3712 /* prepare the bits and mask */
3713 shift_width = ce_info->lsb % 8;
3714 mask = BIT(ce_info->width) - 1;
3716 /* shift to correct alignment */
3717 mask <<= shift_width;
3719 /* get the current bits from the src bit string */
3720 src = src_ctx + (ce_info->lsb / 8);
3722 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
3724 /* the data in the memory is stored as little endian so mask it
3727 src_word &= ~(CPU_TO_LE16(mask));
3729 /* get the data back into host order before shifting */
3730 dest_word = LE16_TO_CPU(src_word);
3732 dest_word >>= shift_width;
3734 /* get the address from the struct field */
3735 target = dest_ctx + ce_info->offset;
3737 /* put it back in the struct */
3738 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3742 * ice_read_dword - read context dword into struct
3743 * @src_ctx: the context structure to read from
3744 * @dest_ctx: the context to be written to
3745 * @ce_info: a description of the struct to be filled
3748 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3750 u32 dest_dword, mask;
3755 /* prepare the bits and mask */
3756 shift_width = ce_info->lsb % 8;
3758 /* if the field width is exactly 32 on an x86 machine, then the shift
3759 * operation will not work because the SHL instructions count is masked
3760 * to 5 bits so the shift will do nothing
3762 if (ce_info->width < 32)
3763 mask = BIT(ce_info->width) - 1;
3767 /* shift to correct alignment */
3768 mask <<= shift_width;
3770 /* get the current bits from the src bit string */
3771 src = src_ctx + (ce_info->lsb / 8);
3773 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
3775 /* the data in the memory is stored as little endian so mask it
3778 src_dword &= ~(CPU_TO_LE32(mask));
3780 /* get the data back into host order before shifting */
3781 dest_dword = LE32_TO_CPU(src_dword);
3783 dest_dword >>= shift_width;
3785 /* get the address from the struct field */
3786 target = dest_ctx + ce_info->offset;
3788 /* put it back in the struct */
3789 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3793 * ice_read_qword - read context qword into struct
3794 * @src_ctx: the context structure to read from
3795 * @dest_ctx: the context to be written to
3796 * @ce_info: a description of the struct to be filled
3799 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3801 u64 dest_qword, mask;
3806 /* prepare the bits and mask */
3807 shift_width = ce_info->lsb % 8;
3809 /* if the field width is exactly 64 on an x86 machine, then the shift
3810 * operation will not work because the SHL instructions count is masked
3811 * to 6 bits so the shift will do nothing
3813 if (ce_info->width < 64)
3814 mask = BIT_ULL(ce_info->width) - 1;
3818 /* shift to correct alignment */
3819 mask <<= shift_width;
3821 /* get the current bits from the src bit string */
3822 src = src_ctx + (ce_info->lsb / 8);
3824 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
3826 /* the data in the memory is stored as little endian so mask it
3829 src_qword &= ~(CPU_TO_LE64(mask));
3831 /* get the data back into host order before shifting */
3832 dest_qword = LE64_TO_CPU(src_qword);
3834 dest_qword >>= shift_width;
3836 /* get the address from the struct field */
3837 target = dest_ctx + ce_info->offset;
3839 /* put it back in the struct */
3840 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3844 * ice_get_ctx - extract context bits from a packed structure
3845 * @src_ctx: pointer to a generic packed context structure
3846 * @dest_ctx: pointer to a generic non-packed context structure
3847 * @ce_info: a description of the structure to be read from
3850 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3854 for (f = 0; ce_info[f].width; f++) {
3855 switch (ce_info[f].size_of) {
3857 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
3860 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
3863 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
3866 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
3869 /* nothing to do, just keep going */
3878 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3879 * @hw: pointer to the HW struct
3880 * @vsi_handle: software VSI handle
3882 * @q_handle: software queue handle
3885 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3887 struct ice_vsi_ctx *vsi;
3888 struct ice_q_ctx *q_ctx;
3890 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3893 if (q_handle >= vsi->num_lan_q_entries[tc])
3895 if (!vsi->lan_q_ctx[tc])
3897 q_ctx = vsi->lan_q_ctx[tc];
3898 return &q_ctx[q_handle];
3903 * @pi: port information structure
3904 * @vsi_handle: software VSI handle
3906 * @q_handle: software queue handle
3907 * @num_qgrps: Number of added queue groups
3908 * @buf: list of queue groups to be added
3909 * @buf_size: size of buffer for indirect command
3910 * @cd: pointer to command details structure or NULL
3912 * This function adds one LAN queue
3915 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3916 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3917 struct ice_sq_cd *cd)
3919 struct ice_aqc_txsched_elem_data node = { 0 };
3920 struct ice_sched_node *parent;
3921 struct ice_q_ctx *q_ctx;
3922 enum ice_status status;
3925 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3928 if (num_qgrps > 1 || buf->num_txqs > 1)
3929 return ICE_ERR_MAX_LIMIT;
3933 if (!ice_is_vsi_valid(hw, vsi_handle))
3934 return ICE_ERR_PARAM;
3936 ice_acquire_lock(&pi->sched_lock);
3938 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3940 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3942 status = ICE_ERR_PARAM;
3946 /* find a parent node */
3947 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3948 ICE_SCHED_NODE_OWNER_LAN);
3950 status = ICE_ERR_PARAM;
3954 buf->parent_teid = parent->info.node_teid;
3955 node.parent_teid = parent->info.node_teid;
3956 /* Mark that the values in the "generic" section as valid. The default
3957 * value in the "generic" section is zero. This means that :
3958 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3959 * - 0 priority among siblings, indicated by Bit 1-3.
3960 * - WFQ, indicated by Bit 4.
3961 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3963 * - Bit 7 is reserved.
3964 * Without setting the generic section as valid in valid_sections, the
3965 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3967 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3969 /* add the LAN queue */
3970 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3971 if (status != ICE_SUCCESS) {
3972 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3973 LE16_TO_CPU(buf->txqs[0].txq_id),
3974 hw->adminq.sq_last_status);
3978 node.node_teid = buf->txqs[0].q_teid;
3979 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3980 q_ctx->q_handle = q_handle;
3981 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
3983 /* add a leaf node into scheduler tree queue layer */
3984 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3986 status = ice_sched_replay_q_bw(pi, q_ctx);
3989 ice_release_lock(&pi->sched_lock);
3995 * @pi: port information structure
3996 * @vsi_handle: software VSI handle
3998 * @num_queues: number of queues
3999 * @q_handles: pointer to software queue handle array
4000 * @q_ids: pointer to the q_id array
4001 * @q_teids: pointer to queue node teids
4002 * @rst_src: if called due to reset, specifies the reset source
4003 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4004 * @cd: pointer to command details structure or NULL
4006 * This function removes queues and their corresponding nodes in SW DB
4009 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4010 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4011 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4012 struct ice_sq_cd *cd)
4014 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4015 struct ice_aqc_dis_txq_item qg_list;
4016 struct ice_q_ctx *q_ctx;
4019 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4023 /* if queue is disabled already yet the disable queue command
4024 * has to be sent to complete the VF reset, then call
4025 * ice_aq_dis_lan_txq without any queue information
4028 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
4033 ice_acquire_lock(&pi->sched_lock);
4035 for (i = 0; i < num_queues; i++) {
4036 struct ice_sched_node *node;
4038 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4041 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
4043 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4047 if (q_ctx->q_handle != q_handles[i]) {
4048 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4049 q_ctx->q_handle, q_handles[i]);
4052 qg_list.parent_teid = node->info.parent_teid;
4054 qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
4055 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
4056 sizeof(qg_list), rst_src, vmvf_num,
4059 if (status != ICE_SUCCESS)
4061 ice_free_sched_node(pi, node);
4062 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4064 ice_release_lock(&pi->sched_lock);
4069 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4070 * @pi: port information structure
4071 * @vsi_handle: software VSI handle
4072 * @tc_bitmap: TC bitmap
4073 * @maxqs: max queues array per TC
4074 * @owner: LAN or RDMA
4076 * This function adds/updates the VSI queues per TC.
4078 static enum ice_status
4079 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4080 u16 *maxqs, u8 owner)
4082 enum ice_status status = ICE_SUCCESS;
4085 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4088 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4089 return ICE_ERR_PARAM;
4091 ice_acquire_lock(&pi->sched_lock);
4093 ice_for_each_traffic_class(i) {
4094 /* configuration is possible only if TC node is present */
4095 if (!ice_sched_get_tc_node(pi, i))
4098 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4099 ice_is_tc_ena(tc_bitmap, i));
4104 ice_release_lock(&pi->sched_lock);
4109 * ice_cfg_vsi_lan - configure VSI LAN queues
4110 * @pi: port information structure
4111 * @vsi_handle: software VSI handle
4112 * @tc_bitmap: TC bitmap
4113 * @max_lanqs: max LAN queues array per TC
4115 * This function adds/updates the VSI LAN queues per TC.
4118 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4121 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4122 ICE_SCHED_NODE_OWNER_LAN);
4126 * ice_is_main_vsi - checks whether the VSI is main VSI
4127 * @hw: pointer to the HW struct
4128 * @vsi_handle: VSI handle
4130 * Checks whether the VSI is the main VSI (the first PF VSI created on
4133 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4135 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4139 * ice_replay_pre_init - replay pre initialization
4140 * @hw: pointer to the HW struct
4141 * @sw: pointer to switch info struct for which function initializes filters
4143 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4145 static enum ice_status
4146 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4150 /* Delete old entries from replay filter list head if there is any */
4151 ice_rm_sw_replay_rule_info(hw, sw);
4152 /* In start of replay, move entries into replay_rules list, it
4153 * will allow adding rules entries back to filt_rules list,
4154 * which is operational list.
4156 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4157 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4158 &sw->recp_list[i].filt_replay_rules);
4159 ice_sched_replay_agg_vsi_preinit(hw);
4161 return ice_sched_replay_tc_node_bw(hw->port_info);
4165 * ice_replay_vsi - replay VSI configuration
4166 * @hw: pointer to the HW struct
4167 * @vsi_handle: driver VSI handle
4169 * Restore all VSI configuration after reset. It is required to call this
4170 * function with main VSI first.
4172 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4174 struct ice_switch_info *sw = hw->switch_info;
4175 struct ice_port_info *pi = hw->port_info;
4176 enum ice_status status;
4178 if (!ice_is_vsi_valid(hw, vsi_handle))
4179 return ICE_ERR_PARAM;
4181 /* Replay pre-initialization if there is any */
4182 if (ice_is_main_vsi(hw, vsi_handle)) {
4183 status = ice_replay_pre_init(hw, sw);
4187 /* Replay per VSI all RSS configurations */
4188 status = ice_replay_rss_cfg(hw, vsi_handle);
4191 /* Replay per VSI all filters */
4192 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4194 status = ice_replay_vsi_agg(hw, vsi_handle);
4199 * ice_replay_post - post replay configuration cleanup
4200 * @hw: pointer to the HW struct
4202 * Post replay cleanup.
4204 void ice_replay_post(struct ice_hw *hw)
4206 /* Delete old entries from replay filter list head */
4207 ice_rm_all_sw_replay_rule_info(hw);
4208 ice_sched_replay_agg(hw);
4212 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4213 * @hw: ptr to the hardware info
4214 * @reg: offset of 64 bit HW register to read from
4215 * @prev_stat_loaded: bool to specify if previous stats are loaded
4216 * @prev_stat: ptr to previous loaded stat value
4217 * @cur_stat: ptr to current stat value
4220 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4221 u64 *prev_stat, u64 *cur_stat)
4223 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4225 /* device stats are not reset at PFR, they likely will not be zeroed
4226 * when the driver starts. Thus, save the value from the first read
4227 * without adding to the statistic value so that we report stats which
4228 * count up from zero.
4230 if (!prev_stat_loaded) {
4231 *prev_stat = new_data;
4235 /* Calculate the difference between the new and old values, and then
4236 * add it to the software stat value.
4238 if (new_data >= *prev_stat)
4239 *cur_stat += new_data - *prev_stat;
4241 /* to manage the potential roll-over */
4242 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4244 /* Update the previously stored value to prepare for next read */
4245 *prev_stat = new_data;
4249 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4250 * @hw: ptr to the hardware info
4251 * @reg: offset of HW register to read from
4252 * @prev_stat_loaded: bool to specify if previous stats are loaded
4253 * @prev_stat: ptr to previous loaded stat value
4254 * @cur_stat: ptr to current stat value
4257 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4258 u64 *prev_stat, u64 *cur_stat)
4262 new_data = rd32(hw, reg);
4264 /* device stats are not reset at PFR, they likely will not be zeroed
4265 * when the driver starts. Thus, save the value from the first read
4266 * without adding to the statistic value so that we report stats which
4267 * count up from zero.
4269 if (!prev_stat_loaded) {
4270 *prev_stat = new_data;
4274 /* Calculate the difference between the new and old values, and then
4275 * add it to the software stat value.
4277 if (new_data >= *prev_stat)
4278 *cur_stat += new_data - *prev_stat;
4280 /* to manage the potential roll-over */
4281 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4283 /* Update the previously stored value to prepare for next read */
4284 *prev_stat = new_data;
4288 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4289 * @hw: ptr to the hardware info
4290 * @vsi_handle: VSI handle
4291 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4292 * @cur_stats: ptr to current stats structure
4294 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4295 * thus cannot be read using the normal ice_stat_update32 function.
4297 * Read the GLV_REPC register associated with the given VSI, and update the
4298 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4300 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4301 * cleared each time it's read.
4303 * Note that the GLV_RDPC register also counts the causes that would trigger
4304 * GLV_REPC. However, it does not give the finer grained detail about why the
4305 * packets are being dropped. The GLV_REPC values can be used to distinguish
4306 * whether Rx packets are dropped due to errors or due to no available
4310 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4311 struct ice_eth_stats *cur_stats)
4313 u16 vsi_num, no_desc, error_cnt;
4316 if (!ice_is_vsi_valid(hw, vsi_handle))
4319 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4321 /* If we haven't loaded stats yet, just clear the current value */
4322 if (!prev_stat_loaded) {
4323 wr32(hw, GLV_REPC(vsi_num), 0);
4327 repc = rd32(hw, GLV_REPC(vsi_num));
4328 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4329 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4331 /* Clear the count by writing to the stats register */
4332 wr32(hw, GLV_REPC(vsi_num), 0);
4334 cur_stats->rx_no_desc += no_desc;
4335 cur_stats->rx_errors += error_cnt;
4339 * ice_sched_query_elem - query element information from HW
4340 * @hw: pointer to the HW struct
4341 * @node_teid: node TEID to be queried
4342 * @buf: buffer to element information
4344 * This function queries HW element information
4347 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4348 struct ice_aqc_get_elem *buf)
4350 u16 buf_size, num_elem_ret = 0;
4351 enum ice_status status;
4353 buf_size = sizeof(*buf);
4354 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4355 buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
4356 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4358 if (status != ICE_SUCCESS || num_elem_ret != 1)
4359 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4364 * ice_get_fw_mode - returns FW mode
4365 * @hw: pointer to the HW struct
4367 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4369 #define ICE_FW_MODE_DBG_M BIT(0)
4370 #define ICE_FW_MODE_REC_M BIT(1)
4371 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4374 /* check the current FW mode */
4375 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4377 if (fw_mode & ICE_FW_MODE_DBG_M)
4378 return ICE_FW_MODE_DBG;
4379 else if (fw_mode & ICE_FW_MODE_REC_M)
4380 return ICE_FW_MODE_REC;
4381 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4382 return ICE_FW_MODE_ROLLBACK;
4384 return ICE_FW_MODE_NORMAL;
4388 * ice_fw_supports_link_override
4389 * @hw: pointer to the hardware structure
4391 * Checks if the firmware supports link override
4393 bool ice_fw_supports_link_override(struct ice_hw *hw)
4395 /* Currently, only supported for E810 devices */
4396 if (hw->mac_type != ICE_MAC_E810)
4399 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4400 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4402 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4403 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4405 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4413 * ice_get_link_default_override
4414 * @ldo: pointer to the link default override struct
4415 * @pi: pointer to the port info struct
4417 * Gets the link default override for a port
4420 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4421 struct ice_port_info *pi)
4423 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4424 struct ice_hw *hw = pi->hw;
4425 enum ice_status status;
4427 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4428 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4430 ice_debug(hw, ICE_DBG_INIT,
4431 "Failed to read link override TLV.\n");
4435 /* Each port has its own config; calculate for our port */
4436 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4437 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4439 /* link options first */
4440 status = ice_read_sr_word(hw, tlv_start, &buf);
4442 ice_debug(hw, ICE_DBG_INIT,
4443 "Failed to read override link options.\n");
4446 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4447 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4448 ICE_LINK_OVERRIDE_PHY_CFG_S;
4450 /* link PHY config */
4451 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4452 status = ice_read_sr_word(hw, offset, &buf);
4454 ice_debug(hw, ICE_DBG_INIT,
4455 "Failed to read override phy config.\n");
4458 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4461 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4462 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4463 status = ice_read_sr_word(hw, (offset + i), &buf);
4465 ice_debug(hw, ICE_DBG_INIT,
4466 "Failed to read override link options.\n");
4469 /* shift 16 bits at a time to fill 64 bits */
4470 ldo->phy_type_low |= ((u64)buf << (i * 16));
4473 /* PHY types high */
4474 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4475 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4476 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4477 status = ice_read_sr_word(hw, (offset + i), &buf);
4479 ice_debug(hw, ICE_DBG_INIT,
4480 "Failed to read override link options.\n");
4483 /* shift 16 bits at a time to fill 64 bits */
4484 ldo->phy_type_high |= ((u64)buf << (i * 16));