1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 200
14 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
15 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
16 ((ICE_RX_OPC_MDID << \
17 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
18 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
19 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
20 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
22 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
23 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
24 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
26 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
28 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
29 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
30 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
31 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
35 * ice_set_mac_type - Sets MAC type
36 * @hw: pointer to the HW structure
38 * This function sets the MAC type of the adapter based on the
39 * vendor ID and device ID stored in the HW structure.
41 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
43 enum ice_status status = ICE_SUCCESS;
45 ice_debug(hw, ICE_DBG_TRACE, "ice_set_mac_type\n");
47 if (hw->vendor_id == ICE_INTEL_VENDOR_ID) {
48 switch (hw->device_id) {
50 hw->mac_type = ICE_MAC_GENERIC;
54 status = ICE_ERR_DEVICE_NOT_SUPPORTED;
57 ice_debug(hw, ICE_DBG_INIT, "found mac_type: %d, status: %d\n",
58 hw->mac_type, status);
65 * ice_clear_pf_cfg - Clear PF configuration
66 * @hw: pointer to the hardware structure
68 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
69 * configuration, flow director filters, etc.).
71 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
73 struct ice_aq_desc desc;
75 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
77 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
81 * ice_aq_manage_mac_read - manage MAC address read command
82 * @hw: pointer to the HW struct
83 * @buf: a virtual buffer to hold the manage MAC read response
84 * @buf_size: Size of the virtual buffer
85 * @cd: pointer to command details structure or NULL
87 * This function is used to return per PF station MAC address (0x0107).
88 * NOTE: Upon successful completion of this command, MAC address information
89 * is returned in user specified buffer. Please interpret user specified
90 * buffer as "manage_mac_read" response.
91 * Response such as various MAC addresses are stored in HW struct (port.mac)
92 * ice_aq_discover_caps is expected to be called before this function is called.
94 static enum ice_status
95 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
98 struct ice_aqc_manage_mac_read_resp *resp;
99 struct ice_aqc_manage_mac_read *cmd;
100 struct ice_aq_desc desc;
101 enum ice_status status;
105 cmd = &desc.params.mac_read;
107 if (buf_size < sizeof(*resp))
108 return ICE_ERR_BUF_TOO_SHORT;
110 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
116 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
117 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
119 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
120 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
124 /* A single port can report up to two (LAN and WoL) addresses */
125 for (i = 0; i < cmd->num_addr; i++)
126 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
127 ice_memcpy(hw->port_info->mac.lan_addr,
128 resp[i].mac_addr, ETH_ALEN,
130 ice_memcpy(hw->port_info->mac.perm_addr,
132 ETH_ALEN, ICE_DMA_TO_NONDMA);
140 * ice_aq_get_phy_caps - returns PHY capabilities
141 * @pi: port information structure
142 * @qual_mods: report qualified modules
143 * @report_mode: report mode capabilities
144 * @pcaps: structure for PHY capabilities to be filled
145 * @cd: pointer to command details structure or NULL
147 * Returns the various PHY capabilities supported on the Port (0x0600)
150 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
151 struct ice_aqc_get_phy_caps_data *pcaps,
152 struct ice_sq_cd *cd)
154 struct ice_aqc_get_phy_caps *cmd;
155 u16 pcaps_size = sizeof(*pcaps);
156 struct ice_aq_desc desc;
157 enum ice_status status;
159 cmd = &desc.params.get_phy;
161 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
162 return ICE_ERR_PARAM;
164 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
167 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
169 cmd->param0 |= CPU_TO_LE16(report_mode);
170 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
172 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
173 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
174 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
181 * ice_get_media_type - Gets media type
182 * @pi: port information structure
184 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
186 struct ice_link_status *hw_link_info;
189 return ICE_MEDIA_UNKNOWN;
191 hw_link_info = &pi->phy.link_info;
192 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
193 /* If more than one media type is selected, report unknown */
194 return ICE_MEDIA_UNKNOWN;
196 if (hw_link_info->phy_type_low) {
197 switch (hw_link_info->phy_type_low) {
198 case ICE_PHY_TYPE_LOW_1000BASE_SX:
199 case ICE_PHY_TYPE_LOW_1000BASE_LX:
200 case ICE_PHY_TYPE_LOW_10GBASE_SR:
201 case ICE_PHY_TYPE_LOW_10GBASE_LR:
202 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
203 case ICE_PHY_TYPE_LOW_25GBASE_SR:
204 case ICE_PHY_TYPE_LOW_25GBASE_LR:
205 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
206 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
207 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
208 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
209 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
210 case ICE_PHY_TYPE_LOW_50GBASE_SR:
211 case ICE_PHY_TYPE_LOW_50GBASE_FR:
212 case ICE_PHY_TYPE_LOW_50GBASE_LR:
213 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
214 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
215 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
216 case ICE_PHY_TYPE_LOW_100GBASE_DR:
217 return ICE_MEDIA_FIBER;
218 case ICE_PHY_TYPE_LOW_100BASE_TX:
219 case ICE_PHY_TYPE_LOW_1000BASE_T:
220 case ICE_PHY_TYPE_LOW_2500BASE_T:
221 case ICE_PHY_TYPE_LOW_5GBASE_T:
222 case ICE_PHY_TYPE_LOW_10GBASE_T:
223 case ICE_PHY_TYPE_LOW_25GBASE_T:
224 return ICE_MEDIA_BASET;
225 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
226 case ICE_PHY_TYPE_LOW_25GBASE_CR:
227 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
228 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
229 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
230 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
231 case ICE_PHY_TYPE_LOW_50GBASE_CP:
232 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
233 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
234 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
236 case ICE_PHY_TYPE_LOW_1000BASE_KX:
237 case ICE_PHY_TYPE_LOW_2500BASE_KX:
238 case ICE_PHY_TYPE_LOW_2500BASE_X:
239 case ICE_PHY_TYPE_LOW_5GBASE_KR:
240 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
241 case ICE_PHY_TYPE_LOW_25GBASE_KR:
242 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
243 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
244 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
245 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
246 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
247 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
248 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
249 return ICE_MEDIA_BACKPLANE;
252 switch (hw_link_info->phy_type_high) {
253 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
254 return ICE_MEDIA_BACKPLANE;
257 return ICE_MEDIA_UNKNOWN;
261 * ice_aq_get_link_info
262 * @pi: port information structure
263 * @ena_lse: enable/disable LinkStatusEvent reporting
264 * @link: pointer to link status structure - optional
265 * @cd: pointer to command details structure or NULL
267 * Get Link Status (0x607). Returns the link status of the adapter.
270 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
271 struct ice_link_status *link, struct ice_sq_cd *cd)
273 struct ice_link_status *hw_link_info_old, *hw_link_info;
274 struct ice_aqc_get_link_status_data link_data = { 0 };
275 struct ice_aqc_get_link_status *resp;
276 enum ice_media_type *hw_media_type;
277 struct ice_fc_info *hw_fc_info;
278 bool tx_pause, rx_pause;
279 struct ice_aq_desc desc;
280 enum ice_status status;
284 return ICE_ERR_PARAM;
285 hw_link_info_old = &pi->phy.link_info_old;
286 hw_media_type = &pi->phy.media_type;
287 hw_link_info = &pi->phy.link_info;
288 hw_fc_info = &pi->fc;
290 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
291 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
292 resp = &desc.params.get_link_status;
293 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
294 resp->lport_num = pi->lport;
296 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
299 if (status != ICE_SUCCESS)
302 /* save off old link status information */
303 *hw_link_info_old = *hw_link_info;
305 /* update current link status information */
306 hw_link_info->link_speed = LE16_TO_CPU(link_data.link_speed);
307 hw_link_info->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
308 hw_link_info->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
309 *hw_media_type = ice_get_media_type(pi);
310 hw_link_info->link_info = link_data.link_info;
311 hw_link_info->an_info = link_data.an_info;
312 hw_link_info->ext_info = link_data.ext_info;
313 hw_link_info->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
314 hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
315 hw_link_info->topo_media_conflict = link_data.topo_media_conflict;
316 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
319 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
320 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
321 if (tx_pause && rx_pause)
322 hw_fc_info->current_mode = ICE_FC_FULL;
324 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
326 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
328 hw_fc_info->current_mode = ICE_FC_NONE;
330 hw_link_info->lse_ena =
331 !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
334 /* save link status information */
336 *link = *hw_link_info;
338 /* flag cleared so calling functions don't call AQ again */
339 pi->phy.get_link_info = false;
345 * ice_init_flex_flags
346 * @hw: pointer to the hardware structure
347 * @prof_id: Rx Descriptor Builder profile ID
349 * Function to initialize Rx flex flags
351 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
355 /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
356 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
357 * flexiflags1[3:0] - Not used for flag programming
358 * flexiflags2[7:0] - Tunnel and VLAN types
359 * 2 invalid fields in last index
362 /* Rx flex flags are currently programmed for the NIC profiles only.
363 * Different flag bit programming configurations can be added per
366 case ICE_RXDID_FLEX_NIC:
367 case ICE_RXDID_FLEX_NIC_2:
368 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
369 ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
371 /* flex flag 1 is not used for flexi-flag programming, skipping
372 * these four FLG64 bits.
374 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
375 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
376 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
377 ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
378 ICE_FLG_EVLAN_x9100, idx++);
379 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
380 ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
381 ICE_FLG_TNL0, idx++);
382 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
383 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
387 ice_debug(hw, ICE_DBG_INIT,
388 "Flag programming for profile ID %d not supported\n",
395 * @hw: pointer to the hardware structure
396 * @prof_id: Rx Descriptor Builder profile ID
398 * Function to initialize flex descriptors
400 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
402 enum ice_flex_mdid mdid;
405 case ICE_RXDID_FLEX_NIC:
406 case ICE_RXDID_FLEX_NIC_2:
407 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_MDID_RX_HASH_LOW, 0);
408 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_MDID_RX_HASH_HIGH, 1);
409 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_MDID_FLOW_ID_LOWER, 2);
411 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
412 ICE_MDID_SRC_VSI : ICE_MDID_FLOW_ID_HIGH;
414 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
416 ice_init_flex_flags(hw, prof_id);
420 ice_debug(hw, ICE_DBG_INIT,
421 "Field init for profile ID %d not supported\n",
428 * @hw: pointer to the HW struct
429 * @max_frame_size: Maximum Frame Size to be supported
430 * @cd: pointer to command details structure or NULL
432 * Set MAC configuration (0x0603)
435 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
437 u16 fc_threshold_val, tx_timer_val;
438 struct ice_aqc_set_mac_cfg *cmd;
439 struct ice_port_info *pi;
440 struct ice_aq_desc desc;
441 enum ice_status status;
446 cmd = &desc.params.set_mac_cfg;
448 if (max_frame_size == 0)
449 return ICE_ERR_PARAM;
451 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
453 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
455 /* Retrieve the current data_pacing value in FW*/
456 pi = &hw->port_info[port_num];
458 /* We turn on the get_link_info so that ice_update_link_info(...)
461 pi->phy.get_link_info = 1;
463 status = ice_get_link_status(pi, &link_up);
468 cmd->params = pi->phy.link_info.pacing;
470 /* We read back the transmit timer and fc threshold value of
471 * LFC. Thus, we will use index =
472 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
474 * Also, because we are opearating on transmit timer and fc
475 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
477 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
479 /* Retrieve the transmit timer */
481 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
482 tx_timer_val = reg_val &
483 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
484 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
486 /* Retrieve the fc threshold */
488 PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
489 fc_threshold_val = reg_val & MAKEMASK(0xFFFF, 0);
490 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_threshold_val);
492 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
496 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
497 * @hw: pointer to the HW struct
499 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
501 struct ice_switch_info *sw;
503 hw->switch_info = (struct ice_switch_info *)
504 ice_malloc(hw, sizeof(*hw->switch_info));
505 sw = hw->switch_info;
508 return ICE_ERR_NO_MEMORY;
510 INIT_LIST_HEAD(&sw->vsi_list_map_head);
512 return ice_init_def_sw_recp(hw);
516 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
517 * @hw: pointer to the HW struct
519 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
521 struct ice_switch_info *sw = hw->switch_info;
522 struct ice_vsi_list_map_info *v_pos_map;
523 struct ice_vsi_list_map_info *v_tmp_map;
524 struct ice_sw_recipe *recps;
527 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
528 ice_vsi_list_map_info, list_entry) {
529 LIST_DEL(&v_pos_map->list_entry);
530 ice_free(hw, v_pos_map);
532 recps = hw->switch_info->recp_list;
533 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
534 recps[i].root_rid = i;
536 if (recps[i].adv_rule) {
537 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
538 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
540 ice_destroy_lock(&recps[i].filt_rule_lock);
541 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
542 &recps[i].filt_rules,
543 ice_adv_fltr_mgmt_list_entry,
545 LIST_DEL(&lst_itr->list_entry);
546 ice_free(hw, lst_itr->lkups);
547 ice_free(hw, lst_itr);
550 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
552 ice_destroy_lock(&recps[i].filt_rule_lock);
553 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
554 &recps[i].filt_rules,
555 ice_fltr_mgmt_list_entry,
557 LIST_DEL(&lst_itr->list_entry);
558 ice_free(hw, lst_itr);
562 ice_rm_all_sw_replay_rule_info(hw);
563 ice_free(hw, sw->recp_list);
567 #define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
568 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
569 #define ICE_FW_LOG_DESC_SIZE_MAX \
570 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
573 * ice_cfg_fw_log - configure FW logging
574 * @hw: pointer to the HW struct
575 * @enable: enable certain FW logging events if true, disable all if false
577 * This function enables/disables the FW logging via Rx CQ events and a UART
578 * port based on predetermined configurations. FW logging via the Rx CQ can be
579 * enabled/disabled for individual PF's. However, FW logging via the UART can
580 * only be enabled/disabled for all PFs on the same device.
582 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
583 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
584 * before initializing the device.
586 * When re/configuring FW logging, callers need to update the "cfg" elements of
587 * the hw->fw_log.evnts array with the desired logging event configurations for
588 * modules of interest. When disabling FW logging completely, the callers can
589 * just pass false in the "enable" parameter. On completion, the function will
590 * update the "cur" element of the hw->fw_log.evnts array with the resulting
591 * logging event configurations of the modules that are being re/configured. FW
592 * logging modules that are not part of a reconfiguration operation retain their
595 * Before resetting the device, it is recommended that the driver disables FW
596 * logging before shutting down the control queue. When disabling FW logging
597 * ("enable" = false), the latest configurations of FW logging events stored in
598 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
601 * When enabling FW logging to emit log messages via the Rx CQ during the
602 * device's initialization phase, a mechanism alternative to interrupt handlers
603 * needs to be used to extract FW log messages from the Rx CQ periodically and
604 * to prevent the Rx CQ from being full and stalling other types of control
605 * messages from FW to SW. Interrupts are typically disabled during the device's
606 * initialization phase.
608 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
610 struct ice_aqc_fw_logging_data *data = NULL;
611 struct ice_aqc_fw_logging *cmd;
612 enum ice_status status = ICE_SUCCESS;
613 u16 i, chgs = 0, len = 0;
614 struct ice_aq_desc desc;
618 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
621 /* Disable FW logging only when the control queue is still responsive */
623 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
626 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
627 cmd = &desc.params.fw_logging;
629 /* Indicate which controls are valid */
630 if (hw->fw_log.cq_en)
631 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
633 if (hw->fw_log.uart_en)
634 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
637 /* Fill in an array of entries with FW logging modules and
638 * logging events being reconfigured.
640 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
643 /* Keep track of enabled event types */
644 actv_evnts |= hw->fw_log.evnts[i].cfg;
646 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
650 data = (struct ice_aqc_fw_logging_data *)
652 ICE_FW_LOG_DESC_SIZE_MAX);
654 return ICE_ERR_NO_MEMORY;
657 val = i << ICE_AQC_FW_LOG_ID_S;
658 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
659 data->entry[chgs++] = CPU_TO_LE16(val);
662 /* Only enable FW logging if at least one module is specified.
663 * If FW logging is currently enabled but all modules are not
664 * enabled to emit log messages, disable FW logging altogether.
667 /* Leave if there is effectively no change */
671 if (hw->fw_log.cq_en)
672 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
674 if (hw->fw_log.uart_en)
675 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
678 len = ICE_FW_LOG_DESC_SIZE(chgs);
679 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
683 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
685 /* Update the current configuration to reflect events enabled.
686 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
687 * logging mode is enabled for the device. They do not reflect
688 * actual modules being enabled to emit log messages. So, their
689 * values remain unchanged even when all modules are disabled.
691 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
693 hw->fw_log.actv_evnts = actv_evnts;
694 for (i = 0; i < cnt; i++) {
698 /* When disabling all FW logging events as part
699 * of device's de-initialization, the original
700 * configurations are retained, and can be used
701 * to reconfigure FW logging later if the device
704 hw->fw_log.evnts[i].cur = 0;
708 v = LE16_TO_CPU(data->entry[i]);
709 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
710 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
723 * @hw: pointer to the HW struct
724 * @desc: pointer to the AQ message descriptor
725 * @buf: pointer to the buffer accompanying the AQ message
727 * Formats a FW Log message and outputs it via the standard driver logs.
729 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
731 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
732 ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
733 LE16_TO_CPU(desc->datalen));
734 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
738 * ice_get_itr_intrl_gran - determine int/intrl granularity
739 * @hw: pointer to the HW struct
741 * Determines the itr/intrl granularities based on the maximum aggregate
742 * bandwidth according to the device's configuration during power-on.
744 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
746 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
747 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
748 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
750 switch (max_agg_bw) {
751 case ICE_MAX_AGG_BW_200G:
752 case ICE_MAX_AGG_BW_100G:
753 case ICE_MAX_AGG_BW_50G:
754 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
755 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
757 case ICE_MAX_AGG_BW_25G:
758 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
759 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
765 * ice_init_hw - main hardware initialization routine
766 * @hw: pointer to the hardware structure
768 enum ice_status ice_init_hw(struct ice_hw *hw)
770 struct ice_aqc_get_phy_caps_data *pcaps;
771 enum ice_status status;
775 ice_debug(hw, ICE_DBG_TRACE, "ice_init_hw");
778 /* Set MAC type based on DeviceID */
779 status = ice_set_mac_type(hw);
783 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
784 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
785 PF_FUNC_RID_FUNCTION_NUMBER_S;
788 status = ice_reset(hw, ICE_RESET_PFR);
792 ice_get_itr_intrl_gran(hw);
795 status = ice_init_all_ctrlq(hw);
797 goto err_unroll_cqinit;
799 /* Enable FW logging. Not fatal if this fails. */
800 status = ice_cfg_fw_log(hw, true);
802 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
804 status = ice_clear_pf_cfg(hw);
806 goto err_unroll_cqinit;
808 /* Set bit to enable Flow Director filters */
809 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
810 INIT_LIST_HEAD(&hw->fdir_list_head);
812 ice_clear_pxe_mode(hw);
814 status = ice_init_nvm(hw);
816 goto err_unroll_cqinit;
818 status = ice_get_caps(hw);
820 goto err_unroll_cqinit;
822 hw->port_info = (struct ice_port_info *)
823 ice_malloc(hw, sizeof(*hw->port_info));
824 if (!hw->port_info) {
825 status = ICE_ERR_NO_MEMORY;
826 goto err_unroll_cqinit;
829 /* set the back pointer to HW */
830 hw->port_info->hw = hw;
832 /* Initialize port_info struct with switch configuration data */
833 status = ice_get_initial_sw_cfg(hw);
835 goto err_unroll_alloc;
839 /* Query the allocated resources for Tx scheduler */
840 status = ice_sched_query_res_alloc(hw);
842 ice_debug(hw, ICE_DBG_SCHED,
843 "Failed to get scheduler allocated resources\n");
844 goto err_unroll_alloc;
848 /* Initialize port_info struct with scheduler data */
849 status = ice_sched_init_port(hw->port_info);
851 goto err_unroll_sched;
853 pcaps = (struct ice_aqc_get_phy_caps_data *)
854 ice_malloc(hw, sizeof(*pcaps));
856 status = ICE_ERR_NO_MEMORY;
857 goto err_unroll_sched;
860 /* Initialize port_info struct with PHY capabilities */
861 status = ice_aq_get_phy_caps(hw->port_info, false,
862 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
865 goto err_unroll_sched;
867 /* Initialize port_info struct with link information */
868 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
870 goto err_unroll_sched;
871 /* need a valid SW entry point to build a Tx tree */
872 if (!hw->sw_entry_point_layer) {
873 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
874 status = ICE_ERR_CFG;
875 goto err_unroll_sched;
877 INIT_LIST_HEAD(&hw->agg_list);
878 /* Initialize max burst size */
879 if (!hw->max_burst_size)
880 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
882 status = ice_init_fltr_mgmt_struct(hw);
884 goto err_unroll_sched;
887 /* Get MAC information */
888 /* A single port can report up to two (LAN and WoL) addresses */
889 mac_buf = ice_calloc(hw, 2,
890 sizeof(struct ice_aqc_manage_mac_read_resp));
891 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
894 status = ICE_ERR_NO_MEMORY;
895 goto err_unroll_fltr_mgmt_struct;
898 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
899 ice_free(hw, mac_buf);
902 goto err_unroll_fltr_mgmt_struct;
904 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
905 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
907 /* Obtain counter base index which would be used by flow director */
908 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
910 goto err_unroll_fltr_mgmt_struct;
914 err_unroll_fltr_mgmt_struct:
915 ice_cleanup_fltr_mgmt_struct(hw);
917 ice_sched_cleanup_all(hw);
919 ice_free(hw, hw->port_info);
920 hw->port_info = NULL;
922 ice_shutdown_all_ctrlq(hw);
927 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
928 * @hw: pointer to the hardware structure
930 * This should be called only during nominal operation, not as a result of
931 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
932 * applicable initializations if it fails for any reason.
934 void ice_deinit_hw(struct ice_hw *hw)
936 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
937 ice_cleanup_fltr_mgmt_struct(hw);
939 ice_sched_cleanup_all(hw);
940 ice_sched_clear_agg(hw);
944 ice_free(hw, hw->port_info);
945 hw->port_info = NULL;
948 /* Attempt to disable FW logging before shutting down control queues */
949 ice_cfg_fw_log(hw, false);
950 ice_shutdown_all_ctrlq(hw);
952 /* Clear VSI contexts if not already cleared */
953 ice_clear_all_vsi_ctx(hw);
957 * ice_check_reset - Check to see if a global reset is complete
958 * @hw: pointer to the hardware structure
960 enum ice_status ice_check_reset(struct ice_hw *hw)
962 u32 cnt, reg = 0, grst_delay;
964 /* Poll for Device Active state in case a recent CORER, GLOBR,
965 * or EMPR has occurred. The grst delay value is in 100ms units.
966 * Add 1sec for outstanding AQ commands that can take a long time.
968 #define GLGEN_RSTCTL 0x000B8180 /* Reset Source: POR */
969 #define GLGEN_RSTCTL_GRSTDEL_S 0
970 #define GLGEN_RSTCTL_GRSTDEL_M MAKEMASK(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
971 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
972 GLGEN_RSTCTL_GRSTDEL_S) + 10;
974 for (cnt = 0; cnt < grst_delay; cnt++) {
975 ice_msec_delay(100, true);
976 reg = rd32(hw, GLGEN_RSTAT);
977 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
981 if (cnt == grst_delay) {
982 ice_debug(hw, ICE_DBG_INIT,
983 "Global reset polling failed to complete.\n");
984 return ICE_ERR_RESET_FAILED;
987 #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
988 GLNVM_ULD_GLOBR_DONE_M)
990 /* Device is Active; check Global Reset processes are done */
991 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
992 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
993 if (reg == ICE_RESET_DONE_MASK) {
994 ice_debug(hw, ICE_DBG_INIT,
995 "Global reset processes done. %d\n", cnt);
998 ice_msec_delay(10, true);
1001 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1002 ice_debug(hw, ICE_DBG_INIT,
1003 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1005 return ICE_ERR_RESET_FAILED;
1012 * ice_pf_reset - Reset the PF
1013 * @hw: pointer to the hardware structure
1015 * If a global reset has been triggered, this function checks
1016 * for its completion and then issues the PF reset
1018 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1022 /* If at function entry a global reset was already in progress, i.e.
1023 * state is not 'device active' or any of the reset done bits are not
1024 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1025 * global reset is done.
1027 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1028 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1029 /* poll on global reset currently in progress until done */
1030 if (ice_check_reset(hw))
1031 return ICE_ERR_RESET_FAILED;
1037 reg = rd32(hw, PFGEN_CTRL);
1039 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1041 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1042 reg = rd32(hw, PFGEN_CTRL);
1043 if (!(reg & PFGEN_CTRL_PFSWR_M))
1046 ice_msec_delay(1, true);
1049 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1050 ice_debug(hw, ICE_DBG_INIT,
1051 "PF reset polling failed to complete.\n");
1052 return ICE_ERR_RESET_FAILED;
1059 * ice_reset - Perform different types of reset
1060 * @hw: pointer to the hardware structure
1061 * @req: reset request
1063 * This function triggers a reset as specified by the req parameter.
1066 * If anything other than a PF reset is triggered, PXE mode is restored.
1067 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1068 * interface has been restored in the rebuild flow.
1070 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1076 return ice_pf_reset(hw);
1077 case ICE_RESET_CORER:
1078 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1079 val = GLGEN_RTRIG_CORER_M;
1081 case ICE_RESET_GLOBR:
1082 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1083 val = GLGEN_RTRIG_GLOBR_M;
1086 return ICE_ERR_PARAM;
1089 val |= rd32(hw, GLGEN_RTRIG);
1090 wr32(hw, GLGEN_RTRIG, val);
1094 /* wait for the FW to be ready */
1095 return ice_check_reset(hw);
1101 * ice_copy_rxq_ctx_to_hw
1102 * @hw: pointer to the hardware structure
1103 * @ice_rxq_ctx: pointer to the rxq context
1104 * @rxq_index: the index of the Rx queue
1106 * Copies rxq context from dense structure to HW register space
1108 static enum ice_status
1109 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1114 return ICE_ERR_BAD_PTR;
1116 if (rxq_index > QRX_CTRL_MAX_INDEX)
1117 return ICE_ERR_PARAM;
1119 /* Copy each dword separately to HW */
1120 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1121 wr32(hw, QRX_CONTEXT(i, rxq_index),
1122 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1124 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1125 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1131 /* LAN Rx Queue Context */
1132 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1133 /* Field Width LSB */
1134 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1135 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1136 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1137 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1138 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1139 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1140 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1141 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1142 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1143 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1144 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1145 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1146 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1147 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1148 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1149 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1150 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1151 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1152 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1158 * @hw: pointer to the hardware structure
1159 * @rlan_ctx: pointer to the rxq context
1160 * @rxq_index: the index of the Rx queue
1162 * Converts rxq context from sparse to dense structure and then writes
1163 * it to HW register space
1166 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1169 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1171 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1172 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1175 #if !defined(NO_UNUSED_CTX_CODE) || defined(AE_DRIVER)
1178 * @hw: pointer to the hardware structure
1179 * @rxq_index: the index of the Rx queue to clear
1181 * Clears rxq context in HW register space
1183 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1187 if (rxq_index > QRX_CTRL_MAX_INDEX)
1188 return ICE_ERR_PARAM;
1190 /* Clear each dword register separately */
1191 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1192 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1196 #endif /* !NO_UNUSED_CTX_CODE || AE_DRIVER */
1198 /* LAN Tx Queue Context */
1199 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1200 /* Field Width LSB */
1201 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1202 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1203 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1204 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1205 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1206 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1207 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1208 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1209 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1210 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1211 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1212 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1213 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1214 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1215 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1216 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1217 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1218 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1219 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1220 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1221 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1222 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1223 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1224 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1225 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1226 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1227 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
1231 #if !defined(NO_UNUSED_CTX_CODE) || defined(AE_DRIVER)
1233 * ice_copy_tx_cmpltnq_ctx_to_hw
1234 * @hw: pointer to the hardware structure
1235 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1236 * @tx_cmpltnq_index: the index of the completion queue
1238 * Copies Tx completion queue context from dense structure to HW register space
1240 static enum ice_status
1241 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1242 u32 tx_cmpltnq_index)
1246 if (!ice_tx_cmpltnq_ctx)
1247 return ICE_ERR_BAD_PTR;
1249 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1250 return ICE_ERR_PARAM;
1252 /* Copy each dword separately to HW */
1253 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1254 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1255 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1257 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1258 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1264 /* LAN Tx Completion Queue Context */
1265 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1266 /* Field Width LSB */
1267 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1268 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1269 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1270 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1271 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1272 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1273 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1274 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1275 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1276 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1281 * ice_write_tx_cmpltnq_ctx
1282 * @hw: pointer to the hardware structure
1283 * @tx_cmpltnq_ctx: pointer to the completion queue context
1284 * @tx_cmpltnq_index: the index of the completion queue
1286 * Converts completion queue context from sparse to dense structure and then
1287 * writes it to HW register space
1290 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1291 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1292 u32 tx_cmpltnq_index)
1294 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1296 ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1297 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1301 * ice_clear_tx_cmpltnq_ctx
1302 * @hw: pointer to the hardware structure
1303 * @tx_cmpltnq_index: the index of the completion queue to clear
1305 * Clears Tx completion queue context in HW register space
1308 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1312 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1313 return ICE_ERR_PARAM;
1315 /* Clear each dword register separately */
1316 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1317 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1323 * ice_copy_tx_drbell_q_ctx_to_hw
1324 * @hw: pointer to the hardware structure
1325 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1326 * @tx_drbell_q_index: the index of the doorbell queue
1328 * Copies doorbell queue context from dense structure to HW register space
1330 static enum ice_status
1331 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1332 u32 tx_drbell_q_index)
1336 if (!ice_tx_drbell_q_ctx)
1337 return ICE_ERR_BAD_PTR;
1339 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1340 return ICE_ERR_PARAM;
1342 /* Copy each dword separately to HW */
1343 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1344 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1345 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1347 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1348 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1354 /* LAN Tx Doorbell Queue Context info */
1355 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1356 /* Field Width LSB */
1357 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1358 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1359 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1360 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1361 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1362 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1363 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1364 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1365 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1366 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1367 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1372 * ice_write_tx_drbell_q_ctx
1373 * @hw: pointer to the hardware structure
1374 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1375 * @tx_drbell_q_index: the index of the doorbell queue
1377 * Converts doorbell queue context from sparse to dense structure and then
1378 * writes it to HW register space
1381 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1382 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1383 u32 tx_drbell_q_index)
1385 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1387 ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info);
1388 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1392 * ice_clear_tx_drbell_q_ctx
1393 * @hw: pointer to the hardware structure
1394 * @tx_drbell_q_index: the index of the doorbell queue to clear
1396 * Clears doorbell queue context in HW register space
1399 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1403 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1404 return ICE_ERR_PARAM;
1406 /* Clear each dword register separately */
1407 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1408 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1412 #endif /* !NO_UNUSED_CTX_CODE || AE_DRIVER */
1416 * @hw: pointer to the hardware structure
1418 * @desc: pointer to control queue descriptor
1419 * @buf: pointer to command buffer
1420 * @buf_len: max length of buf
1422 * Dumps debug log about control command with descriptor contents.
1425 ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len)
1427 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1430 if (!(mask & hw->debug_mask))
1436 len = LE16_TO_CPU(cq_desc->datalen);
1439 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1440 LE16_TO_CPU(cq_desc->opcode),
1441 LE16_TO_CPU(cq_desc->flags),
1442 LE16_TO_CPU(cq_desc->datalen), LE16_TO_CPU(cq_desc->retval));
1443 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1444 LE32_TO_CPU(cq_desc->cookie_high),
1445 LE32_TO_CPU(cq_desc->cookie_low));
1446 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
1447 LE32_TO_CPU(cq_desc->params.generic.param0),
1448 LE32_TO_CPU(cq_desc->params.generic.param1));
1449 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
1450 LE32_TO_CPU(cq_desc->params.generic.addr_high),
1451 LE32_TO_CPU(cq_desc->params.generic.addr_low));
1452 if (buf && cq_desc->datalen != 0) {
1453 ice_debug(hw, mask, "Buffer:\n");
1457 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1462 /* FW Admin Queue command wrappers */
1465 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1466 * @hw: pointer to the HW struct
1467 * @desc: descriptor describing the command
1468 * @buf: buffer to use for indirect commands (NULL for direct commands)
1469 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1470 * @cd: pointer to command details structure
1472 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1475 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1476 u16 buf_size, struct ice_sq_cd *cd)
1478 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1483 * @hw: pointer to the HW struct
1484 * @cd: pointer to command details structure or NULL
1486 * Get the firmware version (0x0001) from the admin queue commands
1488 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1490 struct ice_aqc_get_ver *resp;
1491 struct ice_aq_desc desc;
1492 enum ice_status status;
1494 resp = &desc.params.get_ver;
1496 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1498 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1501 hw->fw_branch = resp->fw_branch;
1502 hw->fw_maj_ver = resp->fw_major;
1503 hw->fw_min_ver = resp->fw_minor;
1504 hw->fw_patch = resp->fw_patch;
1505 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1506 hw->api_branch = resp->api_branch;
1507 hw->api_maj_ver = resp->api_major;
1508 hw->api_min_ver = resp->api_minor;
1509 hw->api_patch = resp->api_patch;
1516 * ice_aq_send_driver_ver
1517 * @hw: pointer to the HW struct
1518 * @dv: driver's major, minor version
1519 * @cd: pointer to command details structure or NULL
1521 * Send the driver version (0x0002) to the firmware
1524 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1525 struct ice_sq_cd *cd)
1527 struct ice_aqc_driver_ver *cmd;
1528 struct ice_aq_desc desc;
1531 cmd = &desc.params.driver_ver;
1534 return ICE_ERR_PARAM;
1536 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1538 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1539 cmd->major_ver = dv->major_ver;
1540 cmd->minor_ver = dv->minor_ver;
1541 cmd->build_ver = dv->build_ver;
1542 cmd->subbuild_ver = dv->subbuild_ver;
1545 while (len < sizeof(dv->driver_string) &&
1546 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1549 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1554 * @hw: pointer to the HW struct
1555 * @unloading: is the driver unloading itself
1557 * Tell the Firmware that we're shutting down the AdminQ and whether
1558 * or not the driver is unloading as well (0x0003).
1560 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1562 struct ice_aqc_q_shutdown *cmd;
1563 struct ice_aq_desc desc;
1565 cmd = &desc.params.q_shutdown;
1567 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1570 cmd->driver_unloading = CPU_TO_LE32(ICE_AQC_DRIVER_UNLOADING);
1572 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1577 * @hw: pointer to the HW struct
1579 * @access: access type
1580 * @sdp_number: resource number
1581 * @timeout: the maximum time in ms that the driver may hold the resource
1582 * @cd: pointer to command details structure or NULL
1584 * Requests common resource using the admin queue commands (0x0008).
1585 * When attempting to acquire the Global Config Lock, the driver can
1586 * learn of three states:
1587 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1588 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1589 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1590 * successfully downloaded the package; the driver does
1591 * not have to download the package and can continue
1594 * Note that if the caller is in an acquire lock, perform action, release lock
1595 * phase of operation, it is possible that the FW may detect a timeout and issue
1596 * a CORER. In this case, the driver will receive a CORER interrupt and will
1597 * have to determine its cause. The calling thread that is handling this flow
1598 * will likely get an error propagated back to it indicating the Download
1599 * Package, Update Package or the Release Resource AQ commands timed out.
1601 static enum ice_status
1602 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1603 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1604 struct ice_sq_cd *cd)
1606 struct ice_aqc_req_res *cmd_resp;
1607 struct ice_aq_desc desc;
1608 enum ice_status status;
1610 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_req_res");
1612 cmd_resp = &desc.params.res_owner;
1614 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1616 cmd_resp->res_id = CPU_TO_LE16(res);
1617 cmd_resp->access_type = CPU_TO_LE16(access);
1618 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1619 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1622 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1624 /* The completion specifies the maximum time in ms that the driver
1625 * may hold the resource in the Timeout field.
1628 /* Global config lock response utilizes an additional status field.
1630 * If the Global config lock resource is held by some other driver, the
1631 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1632 * and the timeout field indicates the maximum time the current owner
1633 * of the resource has to free it.
1635 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1636 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1637 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1639 } else if (LE16_TO_CPU(cmd_resp->status) ==
1640 ICE_AQ_RES_GLBL_IN_PROG) {
1641 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1642 return ICE_ERR_AQ_ERROR;
1643 } else if (LE16_TO_CPU(cmd_resp->status) ==
1644 ICE_AQ_RES_GLBL_DONE) {
1645 return ICE_ERR_AQ_NO_WORK;
1648 /* invalid FW response, force a timeout immediately */
1650 return ICE_ERR_AQ_ERROR;
1653 /* If the resource is held by some other driver, the command completes
1654 * with a busy return value and the timeout field indicates the maximum
1655 * time the current owner of the resource has to free it.
1657 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1658 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1664 * ice_aq_release_res
1665 * @hw: pointer to the HW struct
1667 * @sdp_number: resource number
1668 * @cd: pointer to command details structure or NULL
1670 * release common resource using the admin queue commands (0x0009)
1672 static enum ice_status
1673 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1674 struct ice_sq_cd *cd)
1676 struct ice_aqc_req_res *cmd;
1677 struct ice_aq_desc desc;
1679 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_release_res");
1681 cmd = &desc.params.res_owner;
1683 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1685 cmd->res_id = CPU_TO_LE16(res);
1686 cmd->res_number = CPU_TO_LE32(sdp_number);
1688 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1693 * @hw: pointer to the HW structure
1695 * @access: access type (read or write)
1696 * @timeout: timeout in milliseconds
1698 * This function will attempt to acquire the ownership of a resource.
1701 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1702 enum ice_aq_res_access_type access, u32 timeout)
1704 #define ICE_RES_POLLING_DELAY_MS 10
1705 u32 delay = ICE_RES_POLLING_DELAY_MS;
1706 u32 time_left = timeout;
1707 enum ice_status status;
1709 ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_res");
1711 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1713 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1714 * previously acquired the resource and performed any necessary updates;
1715 * in this case the caller does not obtain the resource and has no
1716 * further work to do.
1718 if (status == ICE_ERR_AQ_NO_WORK)
1719 goto ice_acquire_res_exit;
1722 ice_debug(hw, ICE_DBG_RES,
1723 "resource %d acquire type %d failed.\n", res, access);
1725 /* If necessary, poll until the current lock owner timeouts */
1726 timeout = time_left;
1727 while (status && timeout && time_left) {
1728 ice_msec_delay(delay, true);
1729 timeout = (timeout > delay) ? timeout - delay : 0;
1730 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1732 if (status == ICE_ERR_AQ_NO_WORK)
1733 /* lock free, but no work to do */
1740 if (status && status != ICE_ERR_AQ_NO_WORK)
1741 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1743 ice_acquire_res_exit:
1744 if (status == ICE_ERR_AQ_NO_WORK) {
1745 if (access == ICE_RES_WRITE)
1746 ice_debug(hw, ICE_DBG_RES,
1747 "resource indicates no work to do.\n");
1749 ice_debug(hw, ICE_DBG_RES,
1750 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1757 * @hw: pointer to the HW structure
1760 * This function will release a resource using the proper Admin Command.
1762 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1764 enum ice_status status;
1765 u32 total_delay = 0;
1767 ice_debug(hw, ICE_DBG_TRACE, "ice_release_res");
1769 status = ice_aq_release_res(hw, res, 0, NULL);
1771 /* there are some rare cases when trying to release the resource
1772 * results in an admin queue timeout, so handle them correctly
1774 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1775 (total_delay < hw->adminq.sq_cmd_timeout)) {
1776 ice_msec_delay(1, true);
1777 status = ice_aq_release_res(hw, res, 0, NULL);
1783 * ice_aq_alloc_free_res - command to allocate/free resources
1784 * @hw: pointer to the HW struct
1785 * @num_entries: number of resource entries in buffer
1786 * @buf: Indirect buffer to hold data parameters and response
1787 * @buf_size: size of buffer for indirect commands
1788 * @opc: pass in the command opcode
1789 * @cd: pointer to command details structure or NULL
1791 * Helper function to allocate/free resources using the admin queue commands
1794 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1795 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1796 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1798 struct ice_aqc_alloc_free_res_cmd *cmd;
1799 struct ice_aq_desc desc;
1801 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_alloc_free_res");
1803 cmd = &desc.params.sw_res_ctrl;
1806 return ICE_ERR_PARAM;
1808 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1809 return ICE_ERR_PARAM;
1811 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1813 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1815 cmd->num_entries = CPU_TO_LE16(num_entries);
1817 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1821 * ice_alloc_hw_res - allocate resource
1822 * @hw: pointer to the HW struct
1823 * @type: type of resource
1824 * @num: number of resources to allocate
1825 * @btm: allocate from bottom
1826 * @res: pointer to array that will receive the resources
1829 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1831 struct ice_aqc_alloc_free_res_elem *buf;
1832 enum ice_status status;
1835 buf_len = sizeof(*buf) + sizeof(buf->elem) * (num - 1);
1836 buf = (struct ice_aqc_alloc_free_res_elem *)
1837 ice_malloc(hw, buf_len);
1839 return ICE_ERR_NO_MEMORY;
1841 /* Prepare buffer to allocate resource. */
1842 buf->num_elems = CPU_TO_LE16(num);
1843 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1844 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1846 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1848 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1849 ice_aqc_opc_alloc_res, NULL);
1851 goto ice_alloc_res_exit;
1853 ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
1854 ICE_NONDMA_TO_NONDMA);
1862 * ice_free_hw_res - free allocated HW resource
1863 * @hw: pointer to the HW struct
1864 * @type: type of resource to free
1865 * @num: number of resources
1866 * @res: pointer to array that contains the resources to free
1869 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1871 struct ice_aqc_alloc_free_res_elem *buf;
1872 enum ice_status status;
1875 buf_len = sizeof(*buf) + sizeof(buf->elem) * (num - 1);
1876 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1878 return ICE_ERR_NO_MEMORY;
1880 /* Prepare buffer to free resource. */
1881 buf->num_elems = CPU_TO_LE16(num);
1882 buf->res_type = CPU_TO_LE16(type);
1883 ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
1884 ICE_NONDMA_TO_NONDMA);
1886 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1887 ice_aqc_opc_free_res, NULL);
1889 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1896 * ice_get_num_per_func - determine number of resources per PF
1897 * @hw: pointer to the HW structure
1898 * @max: value to be evenly split between each PF
1900 * Determine the number of valid functions by going through the bitmap returned
1901 * from parsing capabilities and use this to calculate the number of resources
1902 * per PF based on the max value passed in.
1904 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1908 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1909 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1910 ICE_CAPS_VALID_FUNCS_M);
1919 * ice_parse_caps - parse function/device capabilities
1920 * @hw: pointer to the HW struct
1921 * @buf: pointer to a buffer containing function/device capability records
1922 * @cap_count: number of capability records in the list
1923 * @opc: type of capabilities list to parse
1925 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1928 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1929 enum ice_adminq_opc opc)
1931 struct ice_aqc_list_caps_elem *cap_resp;
1932 struct ice_hw_func_caps *func_p = NULL;
1933 struct ice_hw_dev_caps *dev_p = NULL;
1934 struct ice_hw_common_caps *caps;
1940 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1942 if (opc == ice_aqc_opc_list_dev_caps) {
1943 dev_p = &hw->dev_caps;
1944 caps = &dev_p->common_cap;
1945 } else if (opc == ice_aqc_opc_list_func_caps) {
1946 func_p = &hw->func_caps;
1947 caps = &func_p->common_cap;
1949 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1953 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1954 u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
1955 u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
1956 u32 number = LE32_TO_CPU(cap_resp->number);
1957 u16 cap = LE16_TO_CPU(cap_resp->cap);
1960 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1961 caps->valid_functions = number;
1962 ice_debug(hw, ICE_DBG_INIT,
1963 "HW caps: Valid Functions = %d\n",
1964 caps->valid_functions);
1966 case ICE_AQC_CAPS_VSI:
1968 dev_p->num_vsi_allocd_to_host = number;
1969 ice_debug(hw, ICE_DBG_INIT,
1970 "HW caps: Dev.VSI cnt = %d\n",
1971 dev_p->num_vsi_allocd_to_host);
1972 } else if (func_p) {
1973 func_p->guar_num_vsi =
1974 ice_get_num_per_func(hw, ICE_MAX_VSI);
1975 ice_debug(hw, ICE_DBG_INIT,
1976 "HW caps: Func.VSI cnt = %d\n",
1980 case ICE_AQC_CAPS_DCB:
1981 caps->dcb = (number == 1);
1982 caps->active_tc_bitmap = logical_id;
1983 caps->maxtc = phys_id;
1984 ice_debug(hw, ICE_DBG_INIT,
1985 "HW caps: DCB = %d\n", caps->dcb);
1986 ice_debug(hw, ICE_DBG_INIT,
1987 "HW caps: Active TC bitmap = %d\n",
1988 caps->active_tc_bitmap);
1989 ice_debug(hw, ICE_DBG_INIT,
1990 "HW caps: TC Max = %d\n", caps->maxtc);
1992 case ICE_AQC_CAPS_RSS:
1993 caps->rss_table_size = number;
1994 caps->rss_table_entry_width = logical_id;
1995 ice_debug(hw, ICE_DBG_INIT,
1996 "HW caps: RSS table size = %d\n",
1997 caps->rss_table_size);
1998 ice_debug(hw, ICE_DBG_INIT,
1999 "HW caps: RSS table width = %d\n",
2000 caps->rss_table_entry_width);
2002 case ICE_AQC_CAPS_RXQS:
2003 caps->num_rxq = number;
2004 caps->rxq_first_id = phys_id;
2005 ice_debug(hw, ICE_DBG_INIT,
2006 "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
2007 ice_debug(hw, ICE_DBG_INIT,
2008 "HW caps: Rx first queue ID = %d\n",
2009 caps->rxq_first_id);
2011 case ICE_AQC_CAPS_TXQS:
2012 caps->num_txq = number;
2013 caps->txq_first_id = phys_id;
2014 ice_debug(hw, ICE_DBG_INIT,
2015 "HW caps: Num Tx Qs = %d\n", caps->num_txq);
2016 ice_debug(hw, ICE_DBG_INIT,
2017 "HW caps: Tx first queue ID = %d\n",
2018 caps->txq_first_id);
2020 case ICE_AQC_CAPS_MSIX:
2021 caps->num_msix_vectors = number;
2022 caps->msix_vector_first_id = phys_id;
2023 ice_debug(hw, ICE_DBG_INIT,
2024 "HW caps: MSIX vector count = %d\n",
2025 caps->num_msix_vectors);
2026 ice_debug(hw, ICE_DBG_INIT,
2027 "HW caps: MSIX first vector index = %d\n",
2028 caps->msix_vector_first_id);
2030 case ICE_AQC_CAPS_FD:
2035 dev_p->num_flow_director_fltr = number;
2036 ice_debug(hw, ICE_DBG_INIT,
2037 "HW caps: Dev.fd_fltr =%d\n",
2038 dev_p->num_flow_director_fltr);
2041 reg_val = rd32(hw, GLQF_FD_SIZE);
2042 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2043 GLQF_FD_SIZE_FD_GSIZE_S;
2044 func_p->fd_fltr_guar =
2045 ice_get_num_per_func(hw, val);
2046 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2047 GLQF_FD_SIZE_FD_BSIZE_S;
2048 func_p->fd_fltr_best_effort = val;
2049 ice_debug(hw, ICE_DBG_INIT,
2050 "HW:func.fd_fltr guar= %d\n",
2051 func_p->fd_fltr_guar);
2052 ice_debug(hw, ICE_DBG_INIT,
2053 "HW:func.fd_fltr best effort=%d\n",
2054 func_p->fd_fltr_best_effort);
2058 case ICE_AQC_CAPS_MAX_MTU:
2059 caps->max_mtu = number;
2061 ice_debug(hw, ICE_DBG_INIT,
2062 "HW caps: Dev.MaxMTU = %d\n",
2065 ice_debug(hw, ICE_DBG_INIT,
2066 "HW caps: func.MaxMTU = %d\n",
2070 ice_debug(hw, ICE_DBG_INIT,
2071 "HW caps: Unknown capability[%d]: 0x%x\n", i,
2079 * ice_aq_discover_caps - query function/device capabilities
2080 * @hw: pointer to the HW struct
2081 * @buf: a virtual buffer to hold the capabilities
2082 * @buf_size: Size of the virtual buffer
2083 * @cap_count: cap count needed if AQ err==ENOMEM
2084 * @opc: capabilities type to discover - pass in the command opcode
2085 * @cd: pointer to command details structure or NULL
2087 * Get the function(0x000a)/device(0x000b) capabilities description from
2090 static enum ice_status
2091 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2092 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2094 struct ice_aqc_list_caps *cmd;
2095 struct ice_aq_desc desc;
2096 enum ice_status status;
2098 cmd = &desc.params.get_cap;
2100 if (opc != ice_aqc_opc_list_func_caps &&
2101 opc != ice_aqc_opc_list_dev_caps)
2102 return ICE_ERR_PARAM;
2104 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2106 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2108 ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc);
2109 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
2110 *cap_count = LE32_TO_CPU(cmd->count);
2115 * ice_discover_caps - get info about the HW
2116 * @hw: pointer to the hardware structure
2117 * @opc: capabilities type to discover - pass in the command opcode
2119 static enum ice_status
2120 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
2122 enum ice_status status;
2127 /* The driver doesn't know how many capabilities the device will return
2128 * so the buffer size required isn't known ahead of time. The driver
2129 * starts with cbuf_len and if this turns out to be insufficient, the
2130 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
2131 * The driver then allocates the buffer based on the count and retries
2132 * the operation. So it follows that the retry count is 2.
2134 #define ICE_GET_CAP_BUF_COUNT 40
2135 #define ICE_GET_CAP_RETRY_COUNT 2
2137 cap_count = ICE_GET_CAP_BUF_COUNT;
2138 retries = ICE_GET_CAP_RETRY_COUNT;
2143 cbuf_len = (u16)(cap_count *
2144 sizeof(struct ice_aqc_list_caps_elem));
2145 cbuf = ice_malloc(hw, cbuf_len);
2147 return ICE_ERR_NO_MEMORY;
2149 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
2153 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
2156 /* If ENOMEM is returned, try again with bigger buffer */
2157 } while (--retries);
2163 * ice_get_caps - get info about the HW
2164 * @hw: pointer to the hardware structure
2166 enum ice_status ice_get_caps(struct ice_hw *hw)
2168 enum ice_status status;
2170 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
2172 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
2178 * ice_aq_manage_mac_write - manage MAC address write command
2179 * @hw: pointer to the HW struct
2180 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2181 * @flags: flags to control write behavior
2182 * @cd: pointer to command details structure or NULL
2184 * This function is used to write MAC address to the NVM (0x0108).
2187 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2188 struct ice_sq_cd *cd)
2190 struct ice_aqc_manage_mac_write *cmd;
2191 struct ice_aq_desc desc;
2193 cmd = &desc.params.mac_write;
2194 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2199 /* Prep values for flags, sah, sal */
2200 cmd->sah = HTONS(*((const u16 *)mac_addr));
2201 cmd->sal = HTONL(*((const u32 *)(mac_addr + 2)));
2203 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2207 * ice_aq_clear_pxe_mode
2208 * @hw: pointer to the HW struct
2210 * Tell the firmware that the driver is taking over from PXE (0x0110).
2212 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2214 struct ice_aq_desc desc;
2216 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2217 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2219 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2223 * ice_clear_pxe_mode - clear pxe operations mode
2224 * @hw: pointer to the HW struct
2226 * Make sure all PXE mode settings are cleared, including things
2227 * like descriptor fetch/write-back mode.
2229 void ice_clear_pxe_mode(struct ice_hw *hw)
2231 if (ice_check_sq_alive(hw, &hw->adminq))
2232 ice_aq_clear_pxe_mode(hw);
2237 * ice_get_link_speed_based_on_phy_type - returns link speed
2238 * @phy_type_low: lower part of phy_type
2239 * @phy_type_high: higher part of phy_type
2241 * This helper function will convert an entry in PHY type structure
2242 * [phy_type_low, phy_type_high] to its corresponding link speed.
2243 * Note: In the structure of [phy_type_low, phy_type_high], there should
2244 * be one bit set, as this function will convert one PHY type to its
2246 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2247 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2250 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2252 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2253 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2255 switch (phy_type_low) {
2256 case ICE_PHY_TYPE_LOW_100BASE_TX:
2257 case ICE_PHY_TYPE_LOW_100M_SGMII:
2258 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2260 case ICE_PHY_TYPE_LOW_1000BASE_T:
2261 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2262 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2263 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2264 case ICE_PHY_TYPE_LOW_1G_SGMII:
2265 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2267 case ICE_PHY_TYPE_LOW_2500BASE_T:
2268 case ICE_PHY_TYPE_LOW_2500BASE_X:
2269 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2270 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2272 case ICE_PHY_TYPE_LOW_5GBASE_T:
2273 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2274 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2276 case ICE_PHY_TYPE_LOW_10GBASE_T:
2277 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2278 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2279 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2280 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2281 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2282 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2283 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2285 case ICE_PHY_TYPE_LOW_25GBASE_T:
2286 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2287 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2288 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2289 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2290 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2291 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2292 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2293 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2294 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2295 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2296 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2298 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2299 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2300 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2301 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2302 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2303 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2304 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2306 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2307 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2308 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2309 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2310 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2311 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2312 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2313 case ICE_PHY_TYPE_LOW_50G_AUI2:
2314 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2315 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2316 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2317 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2318 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2319 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2320 case ICE_PHY_TYPE_LOW_50G_AUI1:
2321 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2323 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2324 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2325 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2326 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2327 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2328 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2329 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2330 case ICE_PHY_TYPE_LOW_100G_AUI4:
2331 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2332 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2333 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2334 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2335 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2336 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2339 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2343 switch (phy_type_high) {
2344 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2345 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2346 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2347 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2348 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2349 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2352 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2356 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2357 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2358 return ICE_AQ_LINK_SPEED_UNKNOWN;
2359 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2360 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2361 return ICE_AQ_LINK_SPEED_UNKNOWN;
2362 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2363 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2364 return speed_phy_type_low;
2366 return speed_phy_type_high;
2370 * ice_update_phy_type
2371 * @phy_type_low: pointer to the lower part of phy_type
2372 * @phy_type_high: pointer to the higher part of phy_type
2373 * @link_speeds_bitmap: targeted link speeds bitmap
2375 * Note: For the link_speeds_bitmap structure, you can check it at
2376 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2377 * link_speeds_bitmap include multiple speeds.
2379 * Each entry in this [phy_type_low, phy_type_high] structure will
2380 * present a certain link speed. This helper function will turn on bits
2381 * in [phy_type_low, phy_type_high] structure based on the value of
2382 * link_speeds_bitmap input parameter.
2385 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2386 u16 link_speeds_bitmap)
2388 u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
2393 /* We first check with low part of phy_type */
2394 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2395 pt_low = BIT_ULL(index);
2396 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2398 if (link_speeds_bitmap & speed)
2399 *phy_type_low |= BIT_ULL(index);
2402 /* We then check with high part of phy_type */
2403 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2404 pt_high = BIT_ULL(index);
2405 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2407 if (link_speeds_bitmap & speed)
2408 *phy_type_high |= BIT_ULL(index);
2413 * ice_aq_set_phy_cfg
2414 * @hw: pointer to the HW struct
2415 * @lport: logical port number
2416 * @cfg: structure with PHY configuration data to be set
2417 * @cd: pointer to command details structure or NULL
2419 * Set the various PHY configuration parameters supported on the Port.
2420 * One or more of the Set PHY config parameters may be ignored in an MFP
2421 * mode as the PF may not have the privilege to set some of the PHY Config
2422 * parameters. This status will be indicated by the command response (0x0601).
2425 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2426 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2428 struct ice_aq_desc desc;
2431 return ICE_ERR_PARAM;
2433 /* Ensure that only valid bits of cfg->caps can be turned on. */
2434 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2435 ice_debug(hw, ICE_DBG_PHY,
2436 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2439 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2442 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2443 desc.params.set_phy.lport_num = lport;
2444 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2446 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2450 * ice_update_link_info - update status of the HW network link
2451 * @pi: port info structure of the interested logical port
2453 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2455 struct ice_aqc_get_phy_caps_data *pcaps;
2456 struct ice_phy_info *phy_info;
2457 enum ice_status status;
2461 return ICE_ERR_PARAM;
2465 pcaps = (struct ice_aqc_get_phy_caps_data *)
2466 ice_malloc(hw, sizeof(*pcaps));
2468 return ICE_ERR_NO_MEMORY;
2470 phy_info = &pi->phy;
2471 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2475 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2476 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
2481 ice_memcpy(phy_info->link_info.module_type, &pcaps->module_type,
2482 sizeof(phy_info->link_info.module_type),
2483 ICE_NONDMA_TO_NONDMA);
2486 ice_free(hw, pcaps);
2492 * @pi: port information structure
2493 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2494 * @ena_auto_link_update: enable automatic link update
2496 * Set the requested flow control mode.
2499 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2501 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2502 struct ice_aqc_get_phy_caps_data *pcaps;
2503 enum ice_status status;
2504 u8 pause_mask = 0x0;
2508 return ICE_ERR_PARAM;
2510 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2512 switch (pi->fc.req_mode) {
2514 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2515 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2517 case ICE_FC_RX_PAUSE:
2518 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2520 case ICE_FC_TX_PAUSE:
2521 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2527 pcaps = (struct ice_aqc_get_phy_caps_data *)
2528 ice_malloc(hw, sizeof(*pcaps));
2530 return ICE_ERR_NO_MEMORY;
2532 /* Get the current PHY config */
2533 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2536 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2540 /* clear the old pause settings */
2541 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2542 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2543 /* set the new capabilities */
2544 cfg.caps |= pause_mask;
2545 /* If the capabilities have changed, then set the new config */
2546 if (cfg.caps != pcaps->caps) {
2547 int retry_count, retry_max = 10;
2549 /* Auto restart link so settings take effect */
2550 if (ena_auto_link_update)
2551 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2552 /* Copy over all the old settings */
2553 cfg.phy_type_high = pcaps->phy_type_high;
2554 cfg.phy_type_low = pcaps->phy_type_low;
2555 cfg.low_power_ctrl = pcaps->low_power_ctrl;
2556 cfg.eee_cap = pcaps->eee_cap;
2557 cfg.eeer_value = pcaps->eeer_value;
2558 cfg.link_fec_opt = pcaps->link_fec_options;
2560 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2562 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2566 /* Update the link info
2567 * It sometimes takes a really long time for link to
2568 * come back from the atomic reset. Thus, we wait a
2571 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2572 status = ice_update_link_info(pi);
2574 if (status == ICE_SUCCESS)
2577 ice_msec_delay(100, true);
2581 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2585 ice_free(hw, pcaps);
2590 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2591 * @caps: PHY ability structure to copy date from
2592 * @cfg: PHY configuration structure to copy data to
2594 * Helper function to copy AQC PHY get ability data to PHY set configuration
2598 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2599 struct ice_aqc_set_phy_cfg_data *cfg)
2604 cfg->phy_type_low = caps->phy_type_low;
2605 cfg->phy_type_high = caps->phy_type_high;
2606 cfg->caps = caps->caps;
2607 cfg->low_power_ctrl = caps->low_power_ctrl;
2608 cfg->eee_cap = caps->eee_cap;
2609 cfg->eeer_value = caps->eeer_value;
2610 cfg->link_fec_opt = caps->link_fec_options;
2614 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2615 * @cfg: PHY configuration data to set FEC mode
2616 * @fec: FEC mode to configure
2618 * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2619 * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2620 * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2623 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2627 /* Clear auto FEC and RS bits, and AND BASE-R ability
2628 * bits and OR request bits.
2630 cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2631 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2632 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2633 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2634 ICE_AQC_PHY_FEC_25G_KR_REQ;
2637 /* Clear auto FEC and BASE-R bits, and AND RS ability
2638 * bits and OR request bits.
2640 cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2641 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2642 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2643 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2646 /* Clear auto FEC and all FEC option bits. */
2647 cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2648 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2651 /* AND auto FEC bit, and all caps bits. */
2652 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2658 * ice_get_link_status - get status of the HW network link
2659 * @pi: port information structure
2660 * @link_up: pointer to bool (true/false = linkup/linkdown)
2662 * Variable link_up is true if link is up, false if link is down.
2663 * The variable link_up is invalid if status is non zero. As a
2664 * result of this call, link status reporting becomes enabled
2666 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2668 struct ice_phy_info *phy_info;
2669 enum ice_status status = ICE_SUCCESS;
2671 if (!pi || !link_up)
2672 return ICE_ERR_PARAM;
2674 phy_info = &pi->phy;
2676 if (phy_info->get_link_info) {
2677 status = ice_update_link_info(pi);
2680 ice_debug(pi->hw, ICE_DBG_LINK,
2681 "get link status error, status = %d\n",
2685 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2691 * ice_aq_set_link_restart_an
2692 * @pi: pointer to the port information structure
2693 * @ena_link: if true: enable link, if false: disable link
2694 * @cd: pointer to command details structure or NULL
2696 * Sets up the link and restarts the Auto-Negotiation over the link.
2699 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2700 struct ice_sq_cd *cd)
2702 struct ice_aqc_restart_an *cmd;
2703 struct ice_aq_desc desc;
2705 cmd = &desc.params.restart_an;
2707 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2709 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2710 cmd->lport_num = pi->lport;
2712 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2714 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2716 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2720 * ice_aq_set_event_mask
2721 * @hw: pointer to the HW struct
2722 * @port_num: port number of the physical function
2723 * @mask: event mask to be set
2724 * @cd: pointer to command details structure or NULL
2726 * Set event mask (0x0613)
2729 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2730 struct ice_sq_cd *cd)
2732 struct ice_aqc_set_event_mask *cmd;
2733 struct ice_aq_desc desc;
2735 cmd = &desc.params.set_event_mask;
2737 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2739 cmd->lport_num = port_num;
2741 cmd->event_mask = CPU_TO_LE16(mask);
2742 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2746 * ice_aq_set_mac_loopback
2747 * @hw: pointer to the HW struct
2748 * @ena_lpbk: Enable or Disable loopback
2749 * @cd: pointer to command details structure or NULL
2751 * Enable/disable loopback on a given port
2754 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2756 struct ice_aqc_set_mac_lb *cmd;
2757 struct ice_aq_desc desc;
2759 cmd = &desc.params.set_mac_lb;
2761 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2763 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2765 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2770 * ice_aq_set_port_id_led
2771 * @pi: pointer to the port information
2772 * @is_orig_mode: is this LED set to original mode (by the net-list)
2773 * @cd: pointer to command details structure or NULL
2775 * Set LED value for the given port (0x06e9)
2778 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2779 struct ice_sq_cd *cd)
2781 struct ice_aqc_set_port_id_led *cmd;
2782 struct ice_hw *hw = pi->hw;
2783 struct ice_aq_desc desc;
2785 cmd = &desc.params.set_port_id_led;
2787 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2791 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2793 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2795 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2799 * __ice_aq_get_set_rss_lut
2800 * @hw: pointer to the hardware structure
2801 * @vsi_id: VSI FW index
2802 * @lut_type: LUT table type
2803 * @lut: pointer to the LUT buffer provided by the caller
2804 * @lut_size: size of the LUT buffer
2805 * @glob_lut_idx: global LUT index
2806 * @set: set true to set the table, false to get the table
2808 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2810 static enum ice_status
2811 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2812 u16 lut_size, u8 glob_lut_idx, bool set)
2814 struct ice_aqc_get_set_rss_lut *cmd_resp;
2815 struct ice_aq_desc desc;
2816 enum ice_status status;
2819 cmd_resp = &desc.params.get_set_rss_lut;
2822 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2823 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2825 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2828 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
2829 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2830 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2831 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2834 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2835 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2836 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2837 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2838 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2841 status = ICE_ERR_PARAM;
2842 goto ice_aq_get_set_rss_lut_exit;
2845 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2846 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2847 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2850 goto ice_aq_get_set_rss_lut_send;
2851 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2853 goto ice_aq_get_set_rss_lut_send;
2855 goto ice_aq_get_set_rss_lut_send;
2858 /* LUT size is only valid for Global and PF table types */
2860 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2861 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
2862 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2863 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2865 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2866 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2867 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2868 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2870 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2871 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2872 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2873 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2874 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2879 status = ICE_ERR_PARAM;
2880 goto ice_aq_get_set_rss_lut_exit;
2883 ice_aq_get_set_rss_lut_send:
2884 cmd_resp->flags = CPU_TO_LE16(flags);
2885 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2887 ice_aq_get_set_rss_lut_exit:
2892 * ice_aq_get_rss_lut
2893 * @hw: pointer to the hardware structure
2894 * @vsi_handle: software VSI handle
2895 * @lut_type: LUT table type
2896 * @lut: pointer to the LUT buffer provided by the caller
2897 * @lut_size: size of the LUT buffer
2899 * get the RSS lookup table, PF or VSI type
2902 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2903 u8 *lut, u16 lut_size)
2905 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2906 return ICE_ERR_PARAM;
2908 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2909 lut_type, lut, lut_size, 0, false);
2913 * ice_aq_set_rss_lut
2914 * @hw: pointer to the hardware structure
2915 * @vsi_handle: software VSI handle
2916 * @lut_type: LUT table type
2917 * @lut: pointer to the LUT buffer provided by the caller
2918 * @lut_size: size of the LUT buffer
2920 * set the RSS lookup table, PF or VSI type
2923 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2924 u8 *lut, u16 lut_size)
2926 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2927 return ICE_ERR_PARAM;
2929 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2930 lut_type, lut, lut_size, 0, true);
2934 * __ice_aq_get_set_rss_key
2935 * @hw: pointer to the HW struct
2936 * @vsi_id: VSI FW index
2937 * @key: pointer to key info struct
2938 * @set: set true to set the key, false to get the key
2940 * get (0x0B04) or set (0x0B02) the RSS key per VSI
2943 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2944 struct ice_aqc_get_set_rss_keys *key,
2947 struct ice_aqc_get_set_rss_key *cmd_resp;
2948 u16 key_size = sizeof(*key);
2949 struct ice_aq_desc desc;
2951 cmd_resp = &desc.params.get_set_rss_key;
2954 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2955 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2957 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2960 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
2961 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2962 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2963 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2965 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2969 * ice_aq_get_rss_key
2970 * @hw: pointer to the HW struct
2971 * @vsi_handle: software VSI handle
2972 * @key: pointer to key info struct
2974 * get the RSS key per VSI
2977 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2978 struct ice_aqc_get_set_rss_keys *key)
2980 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2981 return ICE_ERR_PARAM;
2983 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2988 * ice_aq_set_rss_key
2989 * @hw: pointer to the HW struct
2990 * @vsi_handle: software VSI handle
2991 * @keys: pointer to key info struct
2993 * set the RSS key per VSI
2996 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2997 struct ice_aqc_get_set_rss_keys *keys)
2999 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3000 return ICE_ERR_PARAM;
3002 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3007 * ice_aq_add_lan_txq
3008 * @hw: pointer to the hardware structure
3009 * @num_qgrps: Number of added queue groups
3010 * @qg_list: list of queue groups to be added
3011 * @buf_size: size of buffer for indirect command
3012 * @cd: pointer to command details structure or NULL
3014 * Add Tx LAN queue (0x0C30)
3017 * Prior to calling add Tx LAN queue:
3018 * Initialize the following as part of the Tx queue context:
3019 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3020 * Cache profile and Packet shaper profile.
3022 * After add Tx LAN queue AQ command is completed:
3023 * Interrupts should be associated with specific queues,
3024 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3028 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3029 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3030 struct ice_sq_cd *cd)
3032 u16 i, sum_header_size, sum_q_size = 0;
3033 struct ice_aqc_add_tx_qgrp *list;
3034 struct ice_aqc_add_txqs *cmd;
3035 struct ice_aq_desc desc;
3037 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_lan_txq");
3039 cmd = &desc.params.add_txqs;
3041 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3044 return ICE_ERR_PARAM;
3046 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3047 return ICE_ERR_PARAM;
3049 sum_header_size = num_qgrps *
3050 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
3053 for (i = 0; i < num_qgrps; i++) {
3054 struct ice_aqc_add_txqs_perq *q = list->txqs;
3056 sum_q_size += list->num_txqs * sizeof(*q);
3057 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
3060 if (buf_size != (sum_header_size + sum_q_size))
3061 return ICE_ERR_PARAM;
3063 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3065 cmd->num_qgrps = num_qgrps;
3067 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3071 * ice_aq_dis_lan_txq
3072 * @hw: pointer to the hardware structure
3073 * @num_qgrps: number of groups in the list
3074 * @qg_list: the list of groups to disable
3075 * @buf_size: the total size of the qg_list buffer in bytes
3076 * @rst_src: if called due to reset, specifies the reset source
3077 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3078 * @cd: pointer to command details structure or NULL
3080 * Disable LAN Tx queue (0x0C31)
3082 static enum ice_status
3083 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3084 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3085 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3086 struct ice_sq_cd *cd)
3088 struct ice_aqc_dis_txqs *cmd;
3089 struct ice_aq_desc desc;
3090 enum ice_status status;
3093 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_dis_lan_txq");
3094 cmd = &desc.params.dis_txqs;
3095 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3097 /* qg_list can be NULL only in VM/VF reset flow */
3098 if (!qg_list && !rst_src)
3099 return ICE_ERR_PARAM;
3101 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3102 return ICE_ERR_PARAM;
3104 cmd->num_entries = num_qgrps;
3106 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3107 ICE_AQC_Q_DIS_TIMEOUT_M);
3111 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3112 cmd->vmvf_and_timeout |=
3113 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3120 /* flush pipe on time out */
3121 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3122 /* If no queue group info, we are in a reset flow. Issue the AQ */
3126 /* set RD bit to indicate that command buffer is provided by the driver
3127 * and it needs to be read by the firmware
3129 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3131 for (i = 0; i < num_qgrps; ++i) {
3132 /* Calculate the size taken up by the queue IDs in this group */
3133 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
3135 /* Add the size of the group header */
3136 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
3138 /* If the num of queues is even, add 2 bytes of padding */
3139 if ((qg_list[i].num_qs % 2) == 0)
3144 return ICE_ERR_PARAM;
3147 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3150 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3151 vmvf_num, hw->adminq.sq_last_status);
3153 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3154 LE16_TO_CPU(qg_list[0].q_id[0]),
3155 hw->adminq.sq_last_status);
3161 /* End of FW Admin Queue command wrappers */
3164 * ice_write_byte - write a byte to a packed context structure
3165 * @src_ctx: the context structure to read from
3166 * @dest_ctx: the context to be written to
3167 * @ce_info: a description of the struct to be filled
3170 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3172 u8 src_byte, dest_byte, mask;
3176 /* copy from the next struct field */
3177 from = src_ctx + ce_info->offset;
3179 /* prepare the bits and mask */
3180 shift_width = ce_info->lsb % 8;
3181 mask = (u8)(BIT(ce_info->width) - 1);
3186 /* shift to correct alignment */
3187 mask <<= shift_width;
3188 src_byte <<= shift_width;
3190 /* get the current bits from the target bit string */
3191 dest = dest_ctx + (ce_info->lsb / 8);
3193 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3195 dest_byte &= ~mask; /* get the bits not changing */
3196 dest_byte |= src_byte; /* add in the new bits */
3198 /* put it all back */
3199 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3203 * ice_write_word - write a word to a packed context structure
3204 * @src_ctx: the context structure to read from
3205 * @dest_ctx: the context to be written to
3206 * @ce_info: a description of the struct to be filled
3209 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3216 /* copy from the next struct field */
3217 from = src_ctx + ce_info->offset;
3219 /* prepare the bits and mask */
3220 shift_width = ce_info->lsb % 8;
3221 mask = BIT(ce_info->width) - 1;
3223 /* don't swizzle the bits until after the mask because the mask bits
3224 * will be in a different bit position on big endian machines
3226 src_word = *(u16 *)from;
3229 /* shift to correct alignment */
3230 mask <<= shift_width;
3231 src_word <<= shift_width;
3233 /* get the current bits from the target bit string */
3234 dest = dest_ctx + (ce_info->lsb / 8);
3236 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3238 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3239 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3241 /* put it all back */
3242 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3246 * ice_write_dword - write a dword to a packed context structure
3247 * @src_ctx: the context structure to read from
3248 * @dest_ctx: the context to be written to
3249 * @ce_info: a description of the struct to be filled
3252 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3254 u32 src_dword, mask;
3259 /* copy from the next struct field */
3260 from = src_ctx + ce_info->offset;
3262 /* prepare the bits and mask */
3263 shift_width = ce_info->lsb % 8;
3265 /* if the field width is exactly 32 on an x86 machine, then the shift
3266 * operation will not work because the SHL instructions count is masked
3267 * to 5 bits so the shift will do nothing
3269 if (ce_info->width < 32)
3270 mask = BIT(ce_info->width) - 1;
3274 /* don't swizzle the bits until after the mask because the mask bits
3275 * will be in a different bit position on big endian machines
3277 src_dword = *(u32 *)from;
3280 /* shift to correct alignment */
3281 mask <<= shift_width;
3282 src_dword <<= shift_width;
3284 /* get the current bits from the target bit string */
3285 dest = dest_ctx + (ce_info->lsb / 8);
3287 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3289 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3290 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3292 /* put it all back */
3293 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3297 * ice_write_qword - write a qword to a packed context structure
3298 * @src_ctx: the context structure to read from
3299 * @dest_ctx: the context to be written to
3300 * @ce_info: a description of the struct to be filled
3303 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3305 u64 src_qword, mask;
3310 /* copy from the next struct field */
3311 from = src_ctx + ce_info->offset;
3313 /* prepare the bits and mask */
3314 shift_width = ce_info->lsb % 8;
3316 /* if the field width is exactly 64 on an x86 machine, then the shift
3317 * operation will not work because the SHL instructions count is masked
3318 * to 6 bits so the shift will do nothing
3320 if (ce_info->width < 64)
3321 mask = BIT_ULL(ce_info->width) - 1;
3325 /* don't swizzle the bits until after the mask because the mask bits
3326 * will be in a different bit position on big endian machines
3328 src_qword = *(u64 *)from;
3331 /* shift to correct alignment */
3332 mask <<= shift_width;
3333 src_qword <<= shift_width;
3335 /* get the current bits from the target bit string */
3336 dest = dest_ctx + (ce_info->lsb / 8);
3338 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3340 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
3341 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
3343 /* put it all back */
3344 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3348 * ice_set_ctx - set context bits in packed structure
3349 * @src_ctx: pointer to a generic non-packed context structure
3350 * @dest_ctx: pointer to memory for the packed structure
3351 * @ce_info: a description of the structure to be transformed
3354 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3358 for (f = 0; ce_info[f].width; f++) {
3359 /* We have to deal with each element of the FW response
3360 * using the correct size so that we are correct regardless
3361 * of the endianness of the machine.
3363 switch (ce_info[f].size_of) {
3365 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3368 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3371 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3374 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3377 return ICE_ERR_INVAL_SIZE;
3388 * ice_read_byte - read context byte into struct
3389 * @src_ctx: the context structure to read from
3390 * @dest_ctx: the context to be written to
3391 * @ce_info: a description of the struct to be filled
3394 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3400 /* prepare the bits and mask */
3401 shift_width = ce_info->lsb % 8;
3402 mask = (u8)(BIT(ce_info->width) - 1);
3404 /* shift to correct alignment */
3405 mask <<= shift_width;
3407 /* get the current bits from the src bit string */
3408 src = src_ctx + (ce_info->lsb / 8);
3410 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3412 dest_byte &= ~(mask);
3414 dest_byte >>= shift_width;
3416 /* get the address from the struct field */
3417 target = dest_ctx + ce_info->offset;
3419 /* put it back in the struct */
3420 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3424 * ice_read_word - read context word into struct
3425 * @src_ctx: the context structure to read from
3426 * @dest_ctx: the context to be written to
3427 * @ce_info: a description of the struct to be filled
3430 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3432 u16 dest_word, mask;
3437 /* prepare the bits and mask */
3438 shift_width = ce_info->lsb % 8;
3439 mask = BIT(ce_info->width) - 1;
3441 /* shift to correct alignment */
3442 mask <<= shift_width;
3444 /* get the current bits from the src bit string */
3445 src = src_ctx + (ce_info->lsb / 8);
3447 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
3449 /* the data in the memory is stored as little endian so mask it
3452 src_word &= ~(CPU_TO_LE16(mask));
3454 /* get the data back into host order before shifting */
3455 dest_word = LE16_TO_CPU(src_word);
3457 dest_word >>= shift_width;
3459 /* get the address from the struct field */
3460 target = dest_ctx + ce_info->offset;
3462 /* put it back in the struct */
3463 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3467 * ice_read_dword - read context dword into struct
3468 * @src_ctx: the context structure to read from
3469 * @dest_ctx: the context to be written to
3470 * @ce_info: a description of the struct to be filled
3473 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3475 u32 dest_dword, mask;
3480 /* prepare the bits and mask */
3481 shift_width = ce_info->lsb % 8;
3483 /* if the field width is exactly 32 on an x86 machine, then the shift
3484 * operation will not work because the SHL instructions count is masked
3485 * to 5 bits so the shift will do nothing
3487 if (ce_info->width < 32)
3488 mask = BIT(ce_info->width) - 1;
3492 /* shift to correct alignment */
3493 mask <<= shift_width;
3495 /* get the current bits from the src bit string */
3496 src = src_ctx + (ce_info->lsb / 8);
3498 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
3500 /* the data in the memory is stored as little endian so mask it
3503 src_dword &= ~(CPU_TO_LE32(mask));
3505 /* get the data back into host order before shifting */
3506 dest_dword = LE32_TO_CPU(src_dword);
3508 dest_dword >>= shift_width;
3510 /* get the address from the struct field */
3511 target = dest_ctx + ce_info->offset;
3513 /* put it back in the struct */
3514 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3518 * ice_read_qword - read context qword into struct
3519 * @src_ctx: the context structure to read from
3520 * @dest_ctx: the context to be written to
3521 * @ce_info: a description of the struct to be filled
3524 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3526 u64 dest_qword, mask;
3531 /* prepare the bits and mask */
3532 shift_width = ce_info->lsb % 8;
3534 /* if the field width is exactly 64 on an x86 machine, then the shift
3535 * operation will not work because the SHL instructions count is masked
3536 * to 6 bits so the shift will do nothing
3538 if (ce_info->width < 64)
3539 mask = BIT_ULL(ce_info->width) - 1;
3543 /* shift to correct alignment */
3544 mask <<= shift_width;
3546 /* get the current bits from the src bit string */
3547 src = src_ctx + (ce_info->lsb / 8);
3549 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
3551 /* the data in the memory is stored as little endian so mask it
3554 src_qword &= ~(CPU_TO_LE64(mask));
3556 /* get the data back into host order before shifting */
3557 dest_qword = LE64_TO_CPU(src_qword);
3559 dest_qword >>= shift_width;
3561 /* get the address from the struct field */
3562 target = dest_ctx + ce_info->offset;
3564 /* put it back in the struct */
3565 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3569 * ice_get_ctx - extract context bits from a packed structure
3570 * @src_ctx: pointer to a generic packed context structure
3571 * @dest_ctx: pointer to a generic non-packed context structure
3572 * @ce_info: a description of the structure to be read from
3575 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3579 for (f = 0; ce_info[f].width; f++) {
3580 switch (ce_info[f].size_of) {
3582 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
3585 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
3588 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
3591 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
3594 /* nothing to do, just keep going */
3603 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3604 * @hw: pointer to the HW struct
3605 * @vsi_handle: software VSI handle
3607 * @q_handle: software queue handle
3609 static struct ice_q_ctx *
3610 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3612 struct ice_vsi_ctx *vsi;
3613 struct ice_q_ctx *q_ctx;
3615 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3618 if (q_handle >= vsi->num_lan_q_entries[tc])
3620 if (!vsi->lan_q_ctx[tc])
3622 q_ctx = vsi->lan_q_ctx[tc];
3623 return &q_ctx[q_handle];
3628 * @pi: port information structure
3629 * @vsi_handle: software VSI handle
3631 * @q_handle: software queue handle
3632 * @num_qgrps: Number of added queue groups
3633 * @buf: list of queue groups to be added
3634 * @buf_size: size of buffer for indirect command
3635 * @cd: pointer to command details structure or NULL
3637 * This function adds one LAN queue
3640 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3641 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3642 struct ice_sq_cd *cd)
3644 struct ice_aqc_txsched_elem_data node = { 0 };
3645 struct ice_sched_node *parent;
3646 struct ice_q_ctx *q_ctx;
3647 enum ice_status status;
3650 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3653 if (num_qgrps > 1 || buf->num_txqs > 1)
3654 return ICE_ERR_MAX_LIMIT;
3658 if (!ice_is_vsi_valid(hw, vsi_handle))
3659 return ICE_ERR_PARAM;
3661 ice_acquire_lock(&pi->sched_lock);
3663 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3665 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3667 status = ICE_ERR_PARAM;
3671 /* find a parent node */
3672 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3673 ICE_SCHED_NODE_OWNER_LAN);
3675 status = ICE_ERR_PARAM;
3679 buf->parent_teid = parent->info.node_teid;
3680 node.parent_teid = parent->info.node_teid;
3681 /* Mark that the values in the "generic" section as valid. The default
3682 * value in the "generic" section is zero. This means that :
3683 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3684 * - 0 priority among siblings, indicated by Bit 1-3.
3685 * - WFQ, indicated by Bit 4.
3686 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3688 * - Bit 7 is reserved.
3689 * Without setting the generic section as valid in valid_sections, the
3690 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
3692 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3694 /* add the LAN queue */
3695 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3696 if (status != ICE_SUCCESS) {
3697 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3698 LE16_TO_CPU(buf->txqs[0].txq_id),
3699 hw->adminq.sq_last_status);
3703 node.node_teid = buf->txqs[0].q_teid;
3704 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3705 q_ctx->q_handle = q_handle;
3707 /* add a leaf node into schduler tree queue layer */
3708 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3711 ice_release_lock(&pi->sched_lock);
3717 * @pi: port information structure
3718 * @vsi_handle: software VSI handle
3720 * @num_queues: number of queues
3721 * @q_handles: pointer to software queue handle array
3722 * @q_ids: pointer to the q_id array
3723 * @q_teids: pointer to queue node teids
3724 * @rst_src: if called due to reset, specifies the reset source
3725 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3726 * @cd: pointer to command details structure or NULL
3728 * This function removes queues and their corresponding nodes in SW DB
3731 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3732 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3733 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3734 struct ice_sq_cd *cd)
3736 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3737 struct ice_aqc_dis_txq_item qg_list;
3738 struct ice_q_ctx *q_ctx;
3741 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3746 /* if queue is disabled already yet the disable queue command
3747 * has to be sent to complete the VF reset, then call
3748 * ice_aq_dis_lan_txq without any queue information
3751 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3756 ice_acquire_lock(&pi->sched_lock);
3758 for (i = 0; i < num_queues; i++) {
3759 struct ice_sched_node *node;
3761 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3764 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3766 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3770 if (q_ctx->q_handle != q_handles[i]) {
3771 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3772 q_ctx->q_handle, q_handles[i]);
3775 qg_list.parent_teid = node->info.parent_teid;
3777 qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
3778 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3779 sizeof(qg_list), rst_src, vmvf_num,
3782 if (status != ICE_SUCCESS)
3784 ice_free_sched_node(pi, node);
3785 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3787 ice_release_lock(&pi->sched_lock);
3792 * ice_cfg_vsi_qs - configure the new/existing VSI queues
3793 * @pi: port information structure
3794 * @vsi_handle: software VSI handle
3795 * @tc_bitmap: TC bitmap
3796 * @maxqs: max queues array per TC
3797 * @owner: LAN or RDMA
3799 * This function adds/updates the VSI queues per TC.
3801 static enum ice_status
3802 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3803 u16 *maxqs, u8 owner)
3805 enum ice_status status = ICE_SUCCESS;
3808 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3811 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3812 return ICE_ERR_PARAM;
3814 ice_acquire_lock(&pi->sched_lock);
3816 ice_for_each_traffic_class(i) {
3817 /* configuration is possible only if TC node is present */
3818 if (!ice_sched_get_tc_node(pi, i))
3821 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3822 ice_is_tc_ena(tc_bitmap, i));
3827 ice_release_lock(&pi->sched_lock);
3832 * ice_cfg_vsi_lan - configure VSI LAN queues
3833 * @pi: port information structure
3834 * @vsi_handle: software VSI handle
3835 * @tc_bitmap: TC bitmap
3836 * @max_lanqs: max LAN queues array per TC
3838 * This function adds/updates the VSI LAN queues per TC.
3841 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3844 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3845 ICE_SCHED_NODE_OWNER_LAN);
3851 * ice_replay_pre_init - replay pre initialization
3852 * @hw: pointer to the HW struct
3854 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
3856 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3858 struct ice_switch_info *sw = hw->switch_info;
3861 /* Delete old entries from replay filter list head if there is any */
3862 ice_rm_all_sw_replay_rule_info(hw);
3863 /* In start of replay, move entries into replay_rules list, it
3864 * will allow adding rules entries back to filt_rules list,
3865 * which is operational list.
3867 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
3868 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
3869 &sw->recp_list[i].filt_replay_rules);
3870 ice_sched_replay_agg_vsi_preinit(hw);
3872 return ice_sched_replay_tc_node_bw(hw);
3876 * ice_replay_vsi - replay VSI configuration
3877 * @hw: pointer to the HW struct
3878 * @vsi_handle: driver VSI handle
3880 * Restore all VSI configuration after reset. It is required to call this
3881 * function with main VSI first.
3883 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3885 enum ice_status status;
3887 if (!ice_is_vsi_valid(hw, vsi_handle))
3888 return ICE_ERR_PARAM;
3890 /* Replay pre-initialization if there is any */
3891 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3892 status = ice_replay_pre_init(hw);
3896 /* Replay per VSI all RSS configurations */
3897 status = ice_replay_rss_cfg(hw, vsi_handle);
3900 /* Replay per VSI all filters */
3901 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3903 status = ice_replay_vsi_agg(hw, vsi_handle);
3908 * ice_replay_post - post replay configuration cleanup
3909 * @hw: pointer to the HW struct
3911 * Post replay cleanup.
3913 void ice_replay_post(struct ice_hw *hw)
3915 /* Delete old entries from replay filter list head */
3916 ice_rm_all_sw_replay_rule_info(hw);
3917 ice_sched_replay_agg(hw);
3921 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
3922 * @hw: ptr to the hardware info
3923 * @hireg: high 32 bit HW register to read from
3924 * @loreg: low 32 bit HW register to read from
3925 * @prev_stat_loaded: bool to specify if previous stats are loaded
3926 * @prev_stat: ptr to previous loaded stat value
3927 * @cur_stat: ptr to current stat value
3930 ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
3931 bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
3935 new_data = rd32(hw, loreg);
3936 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3938 /* device stats are not reset at PFR, they likely will not be zeroed
3939 * when the driver starts. So save the first values read and use them as
3940 * offsets to be subtracted from the raw values in order to report stats
3941 * that count from zero.
3943 if (!prev_stat_loaded)
3944 *prev_stat = new_data;
3945 if (new_data >= *prev_stat)
3946 *cur_stat = new_data - *prev_stat;
3948 /* to manage the potential roll-over */
3949 *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
3950 *cur_stat &= 0xFFFFFFFFFFULL;
3954 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
3955 * @hw: ptr to the hardware info
3956 * @reg: HW register to read from
3957 * @prev_stat_loaded: bool to specify if previous stats are loaded
3958 * @prev_stat: ptr to previous loaded stat value
3959 * @cur_stat: ptr to current stat value
3962 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3963 u64 *prev_stat, u64 *cur_stat)
3967 new_data = rd32(hw, reg);
3969 /* device stats are not reset at PFR, they likely will not be zeroed
3970 * when the driver starts. So save the first values read and use them as
3971 * offsets to be subtracted from the raw values in order to report stats
3972 * that count from zero.
3974 if (!prev_stat_loaded)
3975 *prev_stat = new_data;
3976 if (new_data >= *prev_stat)
3977 *cur_stat = new_data - *prev_stat;
3979 /* to manage the potential roll-over */
3980 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
3985 * ice_sched_query_elem - query element information from HW
3986 * @hw: pointer to the HW struct
3987 * @node_teid: node TEID to be queried
3988 * @buf: buffer to element information
3990 * This function queries HW element information
3993 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3994 struct ice_aqc_get_elem *buf)
3996 u16 buf_size, num_elem_ret = 0;
3997 enum ice_status status;
3999 buf_size = sizeof(*buf);
4000 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4001 buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
4002 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4004 if (status != ICE_SUCCESS || num_elem_ret != 1)
4005 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4010 * ice_is_fw_in_rec_mode
4011 * @hw: pointer to the HW struct
4013 * This function returns true if fw is in recovery mode
4015 bool ice_is_fw_in_rec_mode(struct ice_hw *hw)
4019 /* check the current FW mode */
4020 reg = rd32(hw, GL_MNG_FWSM);
4021 return (reg & GL_MNG_FWSM_FW_MODES_M) > ICE_FW_MODE_DBG;