1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2018
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 200
14 #define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
15 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
16 ((ICE_RX_OPC_MDID << \
17 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
18 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
19 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
20 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
22 #define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
23 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
24 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
26 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
28 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
29 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
30 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
31 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
35 * ice_set_mac_type - Sets MAC type
36 * @hw: pointer to the HW structure
38 * This function sets the MAC type of the adapter based on the
39 * vendor ID and device ID stored in the hw structure.
41 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
43 enum ice_status status = ICE_SUCCESS;
45 ice_debug(hw, ICE_DBG_TRACE, "ice_set_mac_type\n");
47 if (hw->vendor_id == ICE_INTEL_VENDOR_ID) {
48 switch (hw->device_id) {
50 hw->mac_type = ICE_MAC_GENERIC;
54 status = ICE_ERR_DEVICE_NOT_SUPPORTED;
57 ice_debug(hw, ICE_DBG_INIT, "found mac_type: %d, status: %d\n",
58 hw->mac_type, status);
63 #if defined(FPGA_SUPPORT) || defined(CVL_A0_SUPPORT)
64 void ice_dev_onetime_setup(struct ice_hw *hw)
66 /* configure Rx - set non pxe mode */
67 wr32(hw, GLLAN_RCTL_0, 0x1);
72 #endif /* FPGA_SUPPORT || CVL_A0_SUPPORT */
75 * ice_clear_pf_cfg - Clear PF configuration
76 * @hw: pointer to the hardware structure
78 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
79 * configuration, flow director filters, etc.).
81 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
83 struct ice_aq_desc desc;
85 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
87 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
91 * ice_aq_manage_mac_read - manage MAC address read command
92 * @hw: pointer to the hw struct
93 * @buf: a virtual buffer to hold the manage MAC read response
94 * @buf_size: Size of the virtual buffer
95 * @cd: pointer to command details structure or NULL
97 * This function is used to return per PF station MAC address (0x0107).
98 * NOTE: Upon successful completion of this command, MAC address information
99 * is returned in user specified buffer. Please interpret user specified
100 * buffer as "manage_mac_read" response.
101 * Response such as various MAC addresses are stored in HW struct (port.mac)
102 * ice_aq_discover_caps is expected to be called before this function is called.
104 static enum ice_status
105 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
106 struct ice_sq_cd *cd)
108 struct ice_aqc_manage_mac_read_resp *resp;
109 struct ice_aqc_manage_mac_read *cmd;
110 struct ice_aq_desc desc;
111 enum ice_status status;
115 cmd = &desc.params.mac_read;
117 if (buf_size < sizeof(*resp))
118 return ICE_ERR_BUF_TOO_SHORT;
120 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
122 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
126 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
127 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
129 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
130 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
134 /* A single port can report up to two (LAN and WoL) addresses */
135 for (i = 0; i < cmd->num_addr; i++)
136 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
137 ice_memcpy(hw->port_info->mac.lan_addr,
138 resp[i].mac_addr, ETH_ALEN,
140 ice_memcpy(hw->port_info->mac.perm_addr,
142 ETH_ALEN, ICE_DMA_TO_NONDMA);
150 * ice_aq_get_phy_caps - returns PHY capabilities
151 * @pi: port information structure
152 * @qual_mods: report qualified modules
153 * @report_mode: report mode capabilities
154 * @pcaps: structure for PHY capabilities to be filled
155 * @cd: pointer to command details structure or NULL
157 * Returns the various PHY capabilities supported on the Port (0x0600)
160 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
161 struct ice_aqc_get_phy_caps_data *pcaps,
162 struct ice_sq_cd *cd)
164 struct ice_aqc_get_phy_caps *cmd;
165 u16 pcaps_size = sizeof(*pcaps);
166 struct ice_aq_desc desc;
167 enum ice_status status;
169 cmd = &desc.params.get_phy;
171 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
172 return ICE_ERR_PARAM;
174 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
177 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
179 cmd->param0 |= CPU_TO_LE16(report_mode);
180 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
182 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
183 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
184 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
191 * ice_get_media_type - Gets media type
192 * @pi: port information structure
194 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
196 struct ice_link_status *hw_link_info;
199 return ICE_MEDIA_UNKNOWN;
201 hw_link_info = &pi->phy.link_info;
202 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
203 /* If more than one media type is selected, report unknown */
204 return ICE_MEDIA_UNKNOWN;
206 if (hw_link_info->phy_type_low) {
207 switch (hw_link_info->phy_type_low) {
208 case ICE_PHY_TYPE_LOW_1000BASE_SX:
209 case ICE_PHY_TYPE_LOW_1000BASE_LX:
210 case ICE_PHY_TYPE_LOW_10GBASE_SR:
211 case ICE_PHY_TYPE_LOW_10GBASE_LR:
212 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
213 case ICE_PHY_TYPE_LOW_25GBASE_SR:
214 case ICE_PHY_TYPE_LOW_25GBASE_LR:
215 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
216 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
217 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
218 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
219 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
220 case ICE_PHY_TYPE_LOW_50GBASE_SR:
221 case ICE_PHY_TYPE_LOW_50GBASE_FR:
222 case ICE_PHY_TYPE_LOW_50GBASE_LR:
223 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
224 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
225 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
226 case ICE_PHY_TYPE_LOW_100GBASE_DR:
227 return ICE_MEDIA_FIBER;
228 case ICE_PHY_TYPE_LOW_100BASE_TX:
229 case ICE_PHY_TYPE_LOW_1000BASE_T:
230 case ICE_PHY_TYPE_LOW_2500BASE_T:
231 case ICE_PHY_TYPE_LOW_5GBASE_T:
232 case ICE_PHY_TYPE_LOW_10GBASE_T:
233 case ICE_PHY_TYPE_LOW_25GBASE_T:
234 return ICE_MEDIA_BASET;
235 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
236 case ICE_PHY_TYPE_LOW_25GBASE_CR:
237 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
238 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
239 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
240 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
241 case ICE_PHY_TYPE_LOW_50GBASE_CP:
242 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
243 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
244 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
246 case ICE_PHY_TYPE_LOW_1000BASE_KX:
247 case ICE_PHY_TYPE_LOW_2500BASE_KX:
248 case ICE_PHY_TYPE_LOW_2500BASE_X:
249 case ICE_PHY_TYPE_LOW_5GBASE_KR:
250 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
251 case ICE_PHY_TYPE_LOW_25GBASE_KR:
252 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
253 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
254 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
255 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
256 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
257 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
258 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
259 return ICE_MEDIA_BACKPLANE;
262 switch (hw_link_info->phy_type_high) {
263 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
264 return ICE_MEDIA_BACKPLANE;
267 return ICE_MEDIA_UNKNOWN;
271 * ice_aq_get_link_info
272 * @pi: port information structure
273 * @ena_lse: enable/disable LinkStatusEvent reporting
274 * @link: pointer to link status structure - optional
275 * @cd: pointer to command details structure or NULL
277 * Get Link Status (0x607). Returns the link status of the adapter.
280 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
281 struct ice_link_status *link, struct ice_sq_cd *cd)
283 struct ice_link_status *hw_link_info_old, *hw_link_info;
284 struct ice_aqc_get_link_status_data link_data = { 0 };
285 struct ice_aqc_get_link_status *resp;
286 enum ice_media_type *hw_media_type;
287 struct ice_fc_info *hw_fc_info;
288 bool tx_pause, rx_pause;
289 struct ice_aq_desc desc;
290 enum ice_status status;
294 return ICE_ERR_PARAM;
295 hw_link_info_old = &pi->phy.link_info_old;
296 hw_media_type = &pi->phy.media_type;
297 hw_link_info = &pi->phy.link_info;
298 hw_fc_info = &pi->fc;
300 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
301 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
302 resp = &desc.params.get_link_status;
303 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
304 resp->lport_num = pi->lport;
306 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
309 if (status != ICE_SUCCESS)
312 /* save off old link status information */
313 *hw_link_info_old = *hw_link_info;
315 /* update current link status information */
316 hw_link_info->link_speed = LE16_TO_CPU(link_data.link_speed);
317 hw_link_info->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
318 hw_link_info->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
319 *hw_media_type = ice_get_media_type(pi);
320 hw_link_info->link_info = link_data.link_info;
321 hw_link_info->an_info = link_data.an_info;
322 hw_link_info->ext_info = link_data.ext_info;
323 hw_link_info->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
324 hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
325 hw_link_info->topo_media_conflict = link_data.topo_media_conflict;
326 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
329 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
330 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
331 if (tx_pause && rx_pause)
332 hw_fc_info->current_mode = ICE_FC_FULL;
334 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
336 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
338 hw_fc_info->current_mode = ICE_FC_NONE;
340 hw_link_info->lse_ena =
341 !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
344 /* save link status information */
346 *link = *hw_link_info;
348 /* flag cleared so calling functions don't call AQ again */
349 pi->phy.get_link_info = false;
355 * ice_init_flex_flags
356 * @hw: pointer to the hardware structure
357 * @prof_id: Rx Descriptor Builder profile ID
359 * Function to initialize Rx flex flags
361 static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
365 /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
366 * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
367 * flexiflags1[3:0] - Not used for flag programming
368 * flexiflags2[7:0] - Tunnel and VLAN types
369 * 2 invalid fields in last index
372 /* Rx flex flags are currently programmed for the NIC profiles only.
373 * Different flag bit programming configurations can be added per
376 case ICE_RXDID_FLEX_NIC:
377 case ICE_RXDID_FLEX_NIC_2:
378 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
379 ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
380 ICE_RXFLG_FIN, idx++);
381 /* flex flag 1 is not used for flexi-flag programming, skipping
382 * these four FLG64 bits.
384 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
385 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
386 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
387 ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
388 ICE_RXFLG_EVLAN_x9100, idx++);
389 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
390 ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
391 ICE_RXFLG_TNL0, idx++);
392 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
393 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
397 ice_debug(hw, ICE_DBG_INIT,
398 "Flag programming for profile ID %d not supported\n",
405 * @hw: pointer to the hardware structure
406 * @prof_id: Rx Descriptor Builder profile ID
408 * Function to initialize flex descriptors
410 static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
412 enum ice_flex_rx_mdid mdid;
415 case ICE_RXDID_FLEX_NIC:
416 case ICE_RXDID_FLEX_NIC_2:
417 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
418 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
419 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
421 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
422 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
424 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
426 ice_init_flex_flags(hw, prof_id);
430 ice_debug(hw, ICE_DBG_INIT,
431 "Field init for profile ID %d not supported\n",
438 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
439 * @hw: pointer to the hw struct
441 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
443 struct ice_switch_info *sw;
445 hw->switch_info = (struct ice_switch_info *)
446 ice_malloc(hw, sizeof(*hw->switch_info));
447 sw = hw->switch_info;
450 return ICE_ERR_NO_MEMORY;
452 INIT_LIST_HEAD(&sw->vsi_list_map_head);
454 return ice_init_def_sw_recp(hw);
458 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
459 * @hw: pointer to the hw struct
461 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
463 struct ice_switch_info *sw = hw->switch_info;
464 struct ice_vsi_list_map_info *v_pos_map;
465 struct ice_vsi_list_map_info *v_tmp_map;
466 struct ice_sw_recipe *recps;
469 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
470 ice_vsi_list_map_info, list_entry) {
471 LIST_DEL(&v_pos_map->list_entry);
472 ice_free(hw, v_pos_map);
474 recps = hw->switch_info->recp_list;
475 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
476 recps[i].root_rid = i;
478 if (recps[i].adv_rule) {
479 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
480 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
482 ice_destroy_lock(&recps[i].filt_rule_lock);
483 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
484 &recps[i].filt_rules,
485 ice_adv_fltr_mgmt_list_entry,
487 LIST_DEL(&lst_itr->list_entry);
488 ice_free(hw, lst_itr->lkups);
489 ice_free(hw, lst_itr);
492 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
494 ice_destroy_lock(&recps[i].filt_rule_lock);
495 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
496 &recps[i].filt_rules,
497 ice_fltr_mgmt_list_entry,
499 LIST_DEL(&lst_itr->list_entry);
500 ice_free(hw, lst_itr);
504 ice_rm_all_sw_replay_rule_info(hw);
505 ice_free(hw, sw->recp_list);
509 #define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
510 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
511 #define ICE_FW_LOG_DESC_SIZE_MAX \
512 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
515 * ice_cfg_fw_log - configure FW logging
516 * @hw: pointer to the hw struct
517 * @enable: enable certain FW logging events if true, disable all if false
519 * This function enables/disables the FW logging via Rx CQ events and a UART
520 * port based on predetermined configurations. FW logging via the Rx CQ can be
521 * enabled/disabled for individual PF's. However, FW logging via the UART can
522 * only be enabled/disabled for all PFs on the same device.
524 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in
525 * hw->fw_log need to be set accordingly, e.g. based on user-provided input,
526 * before initializing the device.
528 * When re/configuring FW logging, callers need to update the "cfg" elements of
529 * the hw->fw_log.evnts array with the desired logging event configurations for
530 * modules of interest. When disabling FW logging completely, the callers can
531 * just pass false in the "enable" parameter. On completion, the function will
532 * update the "cur" element of the hw->fw_log.evnts array with the resulting
533 * logging event configurations of the modules that are being re/configured. FW
534 * logging modules that are not part of a reconfiguration operation retain their
537 * Before resetting the device, it is recommended that the driver disables FW
538 * logging before shutting down the control queue. When disabling FW logging
539 * ("enable" = false), the latest configurations of FW logging events stored in
540 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after
543 * When enabling FW logging to emit log messages via the Rx CQ during the
544 * device's initialization phase, a mechanism alternative to interrupt handlers
545 * needs to be used to extract FW log messages from the Rx CQ periodically and
546 * to prevent the Rx CQ from being full and stalling other types of control
547 * messages from FW to SW. Interrupts are typically disabled during the device's
548 * initialization phase.
550 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
552 struct ice_aqc_fw_logging_data *data = NULL;
553 struct ice_aqc_fw_logging *cmd;
554 enum ice_status status = ICE_SUCCESS;
555 u16 i, chgs = 0, len = 0;
556 struct ice_aq_desc desc;
560 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
563 /* Disable FW logging only when the control queue is still responsive */
565 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
568 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
569 cmd = &desc.params.fw_logging;
571 /* Indicate which controls are valid */
572 if (hw->fw_log.cq_en)
573 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
575 if (hw->fw_log.uart_en)
576 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
579 /* Fill in an array of entries with FW logging modules and
580 * logging events being reconfigured.
582 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
585 /* Keep track of enabled event types */
586 actv_evnts |= hw->fw_log.evnts[i].cfg;
588 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
592 data = (struct ice_aqc_fw_logging_data *)
594 ICE_FW_LOG_DESC_SIZE_MAX);
596 return ICE_ERR_NO_MEMORY;
599 val = i << ICE_AQC_FW_LOG_ID_S;
600 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
601 data->entry[chgs++] = CPU_TO_LE16(val);
604 /* Only enable FW logging if at least one module is specified.
605 * If FW logging is currently enabled but all modules are not
606 * enabled to emit log messages, disable FW logging altogether.
609 /* Leave if there is effectively no change */
613 if (hw->fw_log.cq_en)
614 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
616 if (hw->fw_log.uart_en)
617 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
620 len = ICE_FW_LOG_DESC_SIZE(chgs);
621 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
625 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
627 /* Update the current configuration to reflect events enabled.
628 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW
629 * logging mode is enabled for the device. They do not reflect
630 * actual modules being enabled to emit log messages. So, their
631 * values remain unchanged even when all modules are disabled.
633 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
635 hw->fw_log.actv_evnts = actv_evnts;
636 for (i = 0; i < cnt; i++) {
640 /* When disabling all FW logging events as part
641 * of device's de-initialization, the original
642 * configurations are retained, and can be used
643 * to reconfigure FW logging later if the device
646 hw->fw_log.evnts[i].cur = 0;
650 v = LE16_TO_CPU(data->entry[i]);
651 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
652 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
665 * @hw: pointer to the hw struct
666 * @desc: pointer to the AQ message descriptor
667 * @buf: pointer to the buffer accompanying the AQ message
669 * Formats a FW Log message and outputs it via the standard driver logs.
671 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
673 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
674 ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
675 LE16_TO_CPU(desc->datalen));
676 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
680 * ice_get_itr_intrl_gran - determine int/intrl granularity
681 * @hw: pointer to the hw struct
683 * Determines the itr/intrl granularities based on the maximum aggregate
684 * bandwidth according to the device's configuration during power-on.
686 static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
688 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
689 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
690 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
692 switch (max_agg_bw) {
693 case ICE_MAX_AGG_BW_200G:
694 case ICE_MAX_AGG_BW_100G:
695 case ICE_MAX_AGG_BW_50G:
696 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
697 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
699 case ICE_MAX_AGG_BW_25G:
700 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
701 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
704 ice_debug(hw, ICE_DBG_INIT,
705 "Failed to determine itr/intrl granularity\n");
713 * ice_init_hw - main hardware initialization routine
714 * @hw: pointer to the hardware structure
716 enum ice_status ice_init_hw(struct ice_hw *hw)
718 struct ice_aqc_get_phy_caps_data *pcaps;
719 enum ice_status status;
723 ice_debug(hw, ICE_DBG_TRACE, "ice_init_hw");
726 /* Set MAC type based on DeviceID */
727 status = ice_set_mac_type(hw);
731 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
732 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
733 PF_FUNC_RID_FUNCTION_NUMBER_S;
736 status = ice_reset(hw, ICE_RESET_PFR);
740 status = ice_get_itr_intrl_gran(hw);
745 status = ice_init_all_ctrlq(hw);
747 goto err_unroll_cqinit;
749 /* Enable FW logging. Not fatal if this fails. */
750 status = ice_cfg_fw_log(hw, true);
752 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
754 status = ice_clear_pf_cfg(hw);
756 goto err_unroll_cqinit;
759 ice_clear_pxe_mode(hw);
761 status = ice_init_nvm(hw);
763 goto err_unroll_cqinit;
765 status = ice_get_caps(hw);
767 goto err_unroll_cqinit;
769 hw->port_info = (struct ice_port_info *)
770 ice_malloc(hw, sizeof(*hw->port_info));
771 if (!hw->port_info) {
772 status = ICE_ERR_NO_MEMORY;
773 goto err_unroll_cqinit;
776 /* set the back pointer to hw */
777 hw->port_info->hw = hw;
779 /* Initialize port_info struct with switch configuration data */
780 status = ice_get_initial_sw_cfg(hw);
782 goto err_unroll_alloc;
786 /* Query the allocated resources for Tx scheduler */
787 status = ice_sched_query_res_alloc(hw);
789 ice_debug(hw, ICE_DBG_SCHED,
790 "Failed to get scheduler allocated resources\n");
791 goto err_unroll_alloc;
795 /* Initialize port_info struct with scheduler data */
796 status = ice_sched_init_port(hw->port_info);
798 goto err_unroll_sched;
800 pcaps = (struct ice_aqc_get_phy_caps_data *)
801 ice_malloc(hw, sizeof(*pcaps));
803 status = ICE_ERR_NO_MEMORY;
804 goto err_unroll_sched;
807 /* Initialize port_info struct with PHY capabilities */
808 status = ice_aq_get_phy_caps(hw->port_info, false,
809 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
812 goto err_unroll_sched;
814 /* Initialize port_info struct with link information */
815 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
817 goto err_unroll_sched;
818 /* need a valid SW entry point to build a Tx tree */
819 if (!hw->sw_entry_point_layer) {
820 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
821 status = ICE_ERR_CFG;
822 goto err_unroll_sched;
824 INIT_LIST_HEAD(&hw->agg_list);
825 /* Initialize max burst size */
826 if (!hw->max_burst_size)
827 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
829 status = ice_init_fltr_mgmt_struct(hw);
831 goto err_unroll_sched;
833 #if defined(FPGA_SUPPORT) || defined(CVL_A0_SUPPORT)
834 /* some of the register write workarounds to get Rx working */
835 ice_dev_onetime_setup(hw);
836 #endif /* FPGA_SUPPORT || CVL_A0_SUPPORT */
838 /* Get MAC information */
839 /* A single port can report up to two (LAN and WoL) addresses */
840 mac_buf = ice_calloc(hw, 2,
841 sizeof(struct ice_aqc_manage_mac_read_resp));
842 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
845 status = ICE_ERR_NO_MEMORY;
846 goto err_unroll_fltr_mgmt_struct;
849 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
850 ice_free(hw, mac_buf);
853 goto err_unroll_fltr_mgmt_struct;
855 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
856 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
861 err_unroll_fltr_mgmt_struct:
862 ice_cleanup_fltr_mgmt_struct(hw);
864 ice_sched_cleanup_all(hw);
866 ice_free(hw, hw->port_info);
867 hw->port_info = NULL;
869 ice_shutdown_all_ctrlq(hw);
874 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
875 * @hw: pointer to the hardware structure
877 * This should be called only during nominal operation, not as a result of
878 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
879 * applicable initializations if it fails for any reason.
881 void ice_deinit_hw(struct ice_hw *hw)
883 ice_cleanup_fltr_mgmt_struct(hw);
885 ice_sched_cleanup_all(hw);
886 ice_sched_clear_agg(hw);
889 ice_free(hw, hw->port_info);
890 hw->port_info = NULL;
893 /* Attempt to disable FW logging before shutting down control queues */
894 ice_cfg_fw_log(hw, false);
895 ice_shutdown_all_ctrlq(hw);
897 /* Clear VSI contexts if not already cleared */
898 ice_clear_all_vsi_ctx(hw);
902 * ice_check_reset - Check to see if a global reset is complete
903 * @hw: pointer to the hardware structure
905 enum ice_status ice_check_reset(struct ice_hw *hw)
907 u32 cnt, reg = 0, grst_delay;
909 /* Poll for Device Active state in case a recent CORER, GLOBR,
910 * or EMPR has occurred. The grst delay value is in 100ms units.
911 * Add 1sec for outstanding AQ commands that can take a long time.
913 #define GLGEN_RSTCTL 0x000B8180 /* Reset Source: POR */
914 #define GLGEN_RSTCTL_GRSTDEL_S 0
915 #define GLGEN_RSTCTL_GRSTDEL_M MAKEMASK(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
916 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
917 GLGEN_RSTCTL_GRSTDEL_S) + 10;
919 for (cnt = 0; cnt < grst_delay; cnt++) {
920 ice_msec_delay(100, true);
921 reg = rd32(hw, GLGEN_RSTAT);
922 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
926 if (cnt == grst_delay) {
927 ice_debug(hw, ICE_DBG_INIT,
928 "Global reset polling failed to complete.\n");
929 return ICE_ERR_RESET_FAILED;
932 #define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
933 GLNVM_ULD_GLOBR_DONE_M)
935 /* Device is Active; check Global Reset processes are done */
936 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
937 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
938 if (reg == ICE_RESET_DONE_MASK) {
939 ice_debug(hw, ICE_DBG_INIT,
940 "Global reset processes done. %d\n", cnt);
943 ice_msec_delay(10, true);
946 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
947 ice_debug(hw, ICE_DBG_INIT,
948 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
950 return ICE_ERR_RESET_FAILED;
957 * ice_pf_reset - Reset the PF
958 * @hw: pointer to the hardware structure
960 * If a global reset has been triggered, this function checks
961 * for its completion and then issues the PF reset
963 static enum ice_status ice_pf_reset(struct ice_hw *hw)
967 /* If at function entry a global reset was already in progress, i.e.
968 * state is not 'device active' or any of the reset done bits are not
969 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
970 * global reset is done.
972 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
973 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
974 /* poll on global reset currently in progress until done */
975 if (ice_check_reset(hw))
976 return ICE_ERR_RESET_FAILED;
982 reg = rd32(hw, PFGEN_CTRL);
984 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
986 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
987 reg = rd32(hw, PFGEN_CTRL);
988 if (!(reg & PFGEN_CTRL_PFSWR_M))
991 ice_msec_delay(1, true);
994 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
995 ice_debug(hw, ICE_DBG_INIT,
996 "PF reset polling failed to complete.\n");
997 return ICE_ERR_RESET_FAILED;
1004 * ice_reset - Perform different types of reset
1005 * @hw: pointer to the hardware structure
1006 * @req: reset request
1008 * This function triggers a reset as specified by the req parameter.
1011 * If anything other than a PF reset is triggered, PXE mode is restored.
1012 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1013 * interface has been restored in the rebuild flow.
1015 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1021 return ice_pf_reset(hw);
1022 case ICE_RESET_CORER:
1023 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1024 val = GLGEN_RTRIG_CORER_M;
1026 case ICE_RESET_GLOBR:
1027 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1028 val = GLGEN_RTRIG_GLOBR_M;
1031 return ICE_ERR_PARAM;
1034 val |= rd32(hw, GLGEN_RTRIG);
1035 wr32(hw, GLGEN_RTRIG, val);
1039 /* wait for the FW to be ready */
1040 return ice_check_reset(hw);
1046 * ice_copy_rxq_ctx_to_hw
1047 * @hw: pointer to the hardware structure
1048 * @ice_rxq_ctx: pointer to the rxq context
1049 * @rxq_index: the index of the Rx queue
1051 * Copies rxq context from dense structure to hw register space
1053 static enum ice_status
1054 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1059 return ICE_ERR_BAD_PTR;
1061 if (rxq_index > QRX_CTRL_MAX_INDEX)
1062 return ICE_ERR_PARAM;
1064 /* Copy each dword separately to hw */
1065 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1066 wr32(hw, QRX_CONTEXT(i, rxq_index),
1067 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1069 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1070 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1076 /* LAN Rx Queue Context */
1077 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1078 /* Field Width LSB */
1079 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1080 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1081 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1082 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1083 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1084 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1085 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1086 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1087 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1088 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1089 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1090 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1091 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1092 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1093 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1094 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1095 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1096 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1097 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1103 * @hw: pointer to the hardware structure
1104 * @rlan_ctx: pointer to the rxq context
1105 * @rxq_index: the index of the Rx queue
1107 * Converts rxq context from sparse to dense structure and then writes
1108 * it to hw register space
1111 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1114 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1116 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1117 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1120 #if !defined(NO_UNUSED_CTX_CODE) || defined(AE_DRIVER)
1123 * @hw: pointer to the hardware structure
1124 * @rxq_index: the index of the Rx queue to clear
1126 * Clears rxq context in hw register space
1128 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1132 if (rxq_index > QRX_CTRL_MAX_INDEX)
1133 return ICE_ERR_PARAM;
1135 /* Clear each dword register separately */
1136 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1137 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1141 #endif /* !NO_UNUSED_CTX_CODE || AE_DRIVER */
1143 /* LAN Tx Queue Context */
1144 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1145 /* Field Width LSB */
1146 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1147 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1148 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1149 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1150 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1151 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1152 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1153 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1154 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1155 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1156 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1157 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1158 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1159 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1160 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1161 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1162 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1163 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1164 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1165 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1166 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1167 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1168 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1169 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1170 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1171 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1172 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
1176 #if !defined(NO_UNUSED_CTX_CODE) || defined(AE_DRIVER)
1178 * ice_copy_tx_cmpltnq_ctx_to_hw
1179 * @hw: pointer to the hardware structure
1180 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1181 * @tx_cmpltnq_index: the index of the completion queue
1183 * Copies Tx completion q context from dense structure to hw register space
1185 static enum ice_status
1186 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1187 u32 tx_cmpltnq_index)
1191 if (!ice_tx_cmpltnq_ctx)
1192 return ICE_ERR_BAD_PTR;
1194 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1195 return ICE_ERR_PARAM;
1197 /* Copy each dword separately to hw */
1198 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1199 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1200 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1202 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1203 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1209 /* LAN Tx Completion Queue Context */
1210 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1211 /* Field Width LSB */
1212 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1213 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1214 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1215 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1216 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1217 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1218 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1219 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1220 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1221 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1226 * ice_write_tx_cmpltnq_ctx
1227 * @hw: pointer to the hardware structure
1228 * @tx_cmpltnq_ctx: pointer to the completion queue context
1229 * @tx_cmpltnq_index: the index of the completion queue
1231 * Converts completion queue context from sparse to dense structure and then
1232 * writes it to hw register space
1235 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1236 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1237 u32 tx_cmpltnq_index)
1239 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1241 ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1242 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1246 * ice_clear_tx_cmpltnq_ctx
1247 * @hw: pointer to the hardware structure
1248 * @tx_cmpltnq_index: the index of the completion queue to clear
1250 * Clears Tx completion queue context in hw register space
1253 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1257 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1258 return ICE_ERR_PARAM;
1260 /* Clear each dword register separately */
1261 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1262 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1268 * ice_copy_tx_drbell_q_ctx_to_hw
1269 * @hw: pointer to the hardware structure
1270 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1271 * @tx_drbell_q_index: the index of the doorbell queue
1273 * Copies doorbell q context from dense structure to hw register space
1275 static enum ice_status
1276 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1277 u32 tx_drbell_q_index)
1281 if (!ice_tx_drbell_q_ctx)
1282 return ICE_ERR_BAD_PTR;
1284 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1285 return ICE_ERR_PARAM;
1287 /* Copy each dword separately to hw */
1288 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1289 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1290 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1292 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1293 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1299 /* LAN Tx Doorbell Queue Context info */
1300 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1301 /* Field Width LSB */
1302 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1303 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1304 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1305 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1306 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1307 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1308 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1309 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1310 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1311 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1312 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1317 * ice_write_tx_drbell_q_ctx
1318 * @hw: pointer to the hardware structure
1319 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1320 * @tx_drbell_q_index: the index of the doorbell queue
1322 * Converts doorbell queue context from sparse to dense structure and then
1323 * writes it to hw register space
1326 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1327 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1328 u32 tx_drbell_q_index)
1330 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1332 ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info);
1333 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1337 * ice_clear_tx_drbell_q_ctx
1338 * @hw: pointer to the hardware structure
1339 * @tx_drbell_q_index: the index of the doorbell queue to clear
1341 * Clears doorbell queue context in hw register space
1344 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1348 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1349 return ICE_ERR_PARAM;
1351 /* Clear each dword register separately */
1352 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1353 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1357 #endif /* !NO_UNUSED_CTX_CODE || AE_DRIVER */
1361 * @hw: pointer to the hardware structure
1363 * @desc: pointer to control queue descriptor
1364 * @buf: pointer to command buffer
1365 * @buf_len: max length of buf
1367 * Dumps debug log about control command with descriptor contents.
1370 ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len)
1372 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1375 if (!(mask & hw->debug_mask))
1381 len = LE16_TO_CPU(cq_desc->datalen);
1384 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1385 LE16_TO_CPU(cq_desc->opcode),
1386 LE16_TO_CPU(cq_desc->flags),
1387 LE16_TO_CPU(cq_desc->datalen), LE16_TO_CPU(cq_desc->retval));
1388 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1389 LE32_TO_CPU(cq_desc->cookie_high),
1390 LE32_TO_CPU(cq_desc->cookie_low));
1391 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
1392 LE32_TO_CPU(cq_desc->params.generic.param0),
1393 LE32_TO_CPU(cq_desc->params.generic.param1));
1394 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
1395 LE32_TO_CPU(cq_desc->params.generic.addr_high),
1396 LE32_TO_CPU(cq_desc->params.generic.addr_low));
1397 if (buf && cq_desc->datalen != 0) {
1398 ice_debug(hw, mask, "Buffer:\n");
1402 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1407 /* FW Admin Queue command wrappers */
1410 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1411 * @hw: pointer to the hw struct
1412 * @desc: descriptor describing the command
1413 * @buf: buffer to use for indirect commands (NULL for direct commands)
1414 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1415 * @cd: pointer to command details structure
1417 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1420 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1421 u16 buf_size, struct ice_sq_cd *cd)
1423 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1428 * @hw: pointer to the hw struct
1429 * @cd: pointer to command details structure or NULL
1431 * Get the firmware version (0x0001) from the admin queue commands
1433 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1435 struct ice_aqc_get_ver *resp;
1436 struct ice_aq_desc desc;
1437 enum ice_status status;
1439 resp = &desc.params.get_ver;
1441 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1443 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1446 hw->fw_branch = resp->fw_branch;
1447 hw->fw_maj_ver = resp->fw_major;
1448 hw->fw_min_ver = resp->fw_minor;
1449 hw->fw_patch = resp->fw_patch;
1450 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1451 hw->api_branch = resp->api_branch;
1452 hw->api_maj_ver = resp->api_major;
1453 hw->api_min_ver = resp->api_minor;
1454 hw->api_patch = resp->api_patch;
1463 * @hw: pointer to the hw struct
1464 * @unloading: is the driver unloading itself
1466 * Tell the Firmware that we're shutting down the AdminQ and whether
1467 * or not the driver is unloading as well (0x0003).
1469 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1471 struct ice_aqc_q_shutdown *cmd;
1472 struct ice_aq_desc desc;
1474 cmd = &desc.params.q_shutdown;
1476 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1479 cmd->driver_unloading = CPU_TO_LE32(ICE_AQC_DRIVER_UNLOADING);
1481 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1486 * @hw: pointer to the hw struct
1488 * @access: access type
1489 * @sdp_number: resource number
1490 * @timeout: the maximum time in ms that the driver may hold the resource
1491 * @cd: pointer to command details structure or NULL
1493 * Requests common resource using the admin queue commands (0x0008).
1494 * When attempting to acquire the Global Config Lock, the driver can
1495 * learn of three states:
1496 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1497 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1498 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1499 * successfully downloaded the package; the driver does
1500 * not have to download the package and can continue
1503 * Note that if the caller is in an acquire lock, perform action, release lock
1504 * phase of operation, it is possible that the FW may detect a timeout and issue
1505 * a CORER. In this case, the driver will receive a CORER interrupt and will
1506 * have to determine its cause. The calling thread that is handling this flow
1507 * will likely get an error propagated back to it indicating the Download
1508 * Package, Update Package or the Release Resource AQ commands timed out.
1510 static enum ice_status
1511 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1512 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1513 struct ice_sq_cd *cd)
1515 struct ice_aqc_req_res *cmd_resp;
1516 struct ice_aq_desc desc;
1517 enum ice_status status;
1519 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_req_res");
1521 cmd_resp = &desc.params.res_owner;
1523 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1525 cmd_resp->res_id = CPU_TO_LE16(res);
1526 cmd_resp->access_type = CPU_TO_LE16(access);
1527 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1528 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1531 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1533 /* The completion specifies the maximum time in ms that the driver
1534 * may hold the resource in the Timeout field.
1537 /* Global config lock response utilizes an additional status field.
1539 * If the Global config lock resource is held by some other driver, the
1540 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1541 * and the timeout field indicates the maximum time the current owner
1542 * of the resource has to free it.
1544 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1545 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1546 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1548 } else if (LE16_TO_CPU(cmd_resp->status) ==
1549 ICE_AQ_RES_GLBL_IN_PROG) {
1550 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1551 return ICE_ERR_AQ_ERROR;
1552 } else if (LE16_TO_CPU(cmd_resp->status) ==
1553 ICE_AQ_RES_GLBL_DONE) {
1554 return ICE_ERR_AQ_NO_WORK;
1557 /* invalid FW response, force a timeout immediately */
1559 return ICE_ERR_AQ_ERROR;
1562 /* If the resource is held by some other driver, the command completes
1563 * with a busy return value and the timeout field indicates the maximum
1564 * time the current owner of the resource has to free it.
1566 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1567 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1573 * ice_aq_release_res
1574 * @hw: pointer to the hw struct
1576 * @sdp_number: resource number
1577 * @cd: pointer to command details structure or NULL
1579 * release common resource using the admin queue commands (0x0009)
1581 static enum ice_status
1582 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1583 struct ice_sq_cd *cd)
1585 struct ice_aqc_req_res *cmd;
1586 struct ice_aq_desc desc;
1588 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_release_res");
1590 cmd = &desc.params.res_owner;
1592 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1594 cmd->res_id = CPU_TO_LE16(res);
1595 cmd->res_number = CPU_TO_LE32(sdp_number);
1597 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1602 * @hw: pointer to the HW structure
1604 * @access: access type (read or write)
1605 * @timeout: timeout in milliseconds
1607 * This function will attempt to acquire the ownership of a resource.
1610 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1611 enum ice_aq_res_access_type access, u32 timeout)
1613 #define ICE_RES_POLLING_DELAY_MS 10
1614 u32 delay = ICE_RES_POLLING_DELAY_MS;
1615 u32 time_left = timeout;
1616 enum ice_status status;
1618 ice_debug(hw, ICE_DBG_TRACE, "ice_acquire_res");
1620 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1622 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1623 * previously acquired the resource and performed any necessary updates;
1624 * in this case the caller does not obtain the resource and has no
1625 * further work to do.
1627 if (status == ICE_ERR_AQ_NO_WORK)
1628 goto ice_acquire_res_exit;
1631 ice_debug(hw, ICE_DBG_RES,
1632 "resource %d acquire type %d failed.\n", res, access);
1634 /* If necessary, poll until the current lock owner timeouts */
1635 timeout = time_left;
1636 while (status && timeout && time_left) {
1637 ice_msec_delay(delay, true);
1638 timeout = (timeout > delay) ? timeout - delay : 0;
1639 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1641 if (status == ICE_ERR_AQ_NO_WORK)
1642 /* lock free, but no work to do */
1649 if (status && status != ICE_ERR_AQ_NO_WORK)
1650 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1652 ice_acquire_res_exit:
1653 if (status == ICE_ERR_AQ_NO_WORK) {
1654 if (access == ICE_RES_WRITE)
1655 ice_debug(hw, ICE_DBG_RES,
1656 "resource indicates no work to do.\n");
1658 ice_debug(hw, ICE_DBG_RES,
1659 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1666 * @hw: pointer to the HW structure
1669 * This function will release a resource using the proper Admin Command.
1671 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1673 enum ice_status status;
1674 u32 total_delay = 0;
1676 ice_debug(hw, ICE_DBG_TRACE, "ice_release_res");
1678 status = ice_aq_release_res(hw, res, 0, NULL);
1680 /* there are some rare cases when trying to release the resource
1681 * results in an admin Q timeout, so handle them correctly
1683 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1684 (total_delay < hw->adminq.sq_cmd_timeout)) {
1685 ice_msec_delay(1, true);
1686 status = ice_aq_release_res(hw, res, 0, NULL);
1692 * ice_aq_alloc_free_res - command to allocate/free resources
1693 * @hw: pointer to the hw struct
1694 * @num_entries: number of resource entries in buffer
1695 * @buf: Indirect buffer to hold data parameters and response
1696 * @buf_size: size of buffer for indirect commands
1697 * @opc: pass in the command opcode
1698 * @cd: pointer to command details structure or NULL
1700 * Helper function to allocate/free resources using the admin queue commands
1703 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1704 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1705 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1707 struct ice_aqc_alloc_free_res_cmd *cmd;
1708 struct ice_aq_desc desc;
1710 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_alloc_free_res");
1712 cmd = &desc.params.sw_res_ctrl;
1715 return ICE_ERR_PARAM;
1717 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1718 return ICE_ERR_PARAM;
1720 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1722 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1724 cmd->num_entries = CPU_TO_LE16(num_entries);
1726 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1731 * ice_get_num_per_func - determine number of resources per PF
1732 * @hw: pointer to the hw structure
1733 * @max: value to be evenly split between each PF
1735 * Determine the number of valid functions by going through the bitmap returned
1736 * from parsing capabilities and use this to calculate the number of resources
1737 * per PF based on the max value passed in.
1739 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1743 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1744 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1745 ICE_CAPS_VALID_FUNCS_M);
1754 * ice_parse_caps - parse function/device capabilities
1755 * @hw: pointer to the hw struct
1756 * @buf: pointer to a buffer containing function/device capability records
1757 * @cap_count: number of capability records in the list
1758 * @opc: type of capabilities list to parse
1760 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1763 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1764 enum ice_adminq_opc opc)
1766 struct ice_aqc_list_caps_elem *cap_resp;
1767 struct ice_hw_func_caps *func_p = NULL;
1768 struct ice_hw_dev_caps *dev_p = NULL;
1769 struct ice_hw_common_caps *caps;
1775 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1777 if (opc == ice_aqc_opc_list_dev_caps) {
1778 dev_p = &hw->dev_caps;
1779 caps = &dev_p->common_cap;
1780 } else if (opc == ice_aqc_opc_list_func_caps) {
1781 func_p = &hw->func_caps;
1782 caps = &func_p->common_cap;
1784 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1788 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1789 u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
1790 u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
1791 u32 number = LE32_TO_CPU(cap_resp->number);
1792 u16 cap = LE16_TO_CPU(cap_resp->cap);
1795 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1796 caps->valid_functions = number;
1797 ice_debug(hw, ICE_DBG_INIT,
1798 "HW caps: Valid Functions = %d\n",
1799 caps->valid_functions);
1801 case ICE_AQC_CAPS_VSI:
1803 dev_p->num_vsi_allocd_to_host = number;
1804 ice_debug(hw, ICE_DBG_INIT,
1805 "HW caps: Dev.VSI cnt = %d\n",
1806 dev_p->num_vsi_allocd_to_host);
1807 } else if (func_p) {
1808 func_p->guar_num_vsi =
1809 ice_get_num_per_func(hw, ICE_MAX_VSI);
1810 ice_debug(hw, ICE_DBG_INIT,
1811 "HW caps: Func.VSI cnt = %d\n",
1815 case ICE_AQC_CAPS_RSS:
1816 caps->rss_table_size = number;
1817 caps->rss_table_entry_width = logical_id;
1818 ice_debug(hw, ICE_DBG_INIT,
1819 "HW caps: RSS table size = %d\n",
1820 caps->rss_table_size);
1821 ice_debug(hw, ICE_DBG_INIT,
1822 "HW caps: RSS table width = %d\n",
1823 caps->rss_table_entry_width);
1825 case ICE_AQC_CAPS_RXQS:
1826 caps->num_rxq = number;
1827 caps->rxq_first_id = phys_id;
1828 ice_debug(hw, ICE_DBG_INIT,
1829 "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
1830 ice_debug(hw, ICE_DBG_INIT,
1831 "HW caps: Rx first queue ID = %d\n",
1832 caps->rxq_first_id);
1834 case ICE_AQC_CAPS_TXQS:
1835 caps->num_txq = number;
1836 caps->txq_first_id = phys_id;
1837 ice_debug(hw, ICE_DBG_INIT,
1838 "HW caps: Num Tx Qs = %d\n", caps->num_txq);
1839 ice_debug(hw, ICE_DBG_INIT,
1840 "HW caps: Tx first queue ID = %d\n",
1841 caps->txq_first_id);
1843 case ICE_AQC_CAPS_MSIX:
1844 caps->num_msix_vectors = number;
1845 caps->msix_vector_first_id = phys_id;
1846 ice_debug(hw, ICE_DBG_INIT,
1847 "HW caps: MSIX vector count = %d\n",
1848 caps->num_msix_vectors);
1849 ice_debug(hw, ICE_DBG_INIT,
1850 "HW caps: MSIX first vector index = %d\n",
1851 caps->msix_vector_first_id);
1853 case ICE_AQC_CAPS_MAX_MTU:
1854 caps->max_mtu = number;
1856 ice_debug(hw, ICE_DBG_INIT,
1857 "HW caps: Dev.MaxMTU = %d\n",
1860 ice_debug(hw, ICE_DBG_INIT,
1861 "HW caps: func.MaxMTU = %d\n",
1865 ice_debug(hw, ICE_DBG_INIT,
1866 "HW caps: Unknown capability[%d]: 0x%x\n", i,
1874 * ice_aq_discover_caps - query function/device capabilities
1875 * @hw: pointer to the hw struct
1876 * @buf: a virtual buffer to hold the capabilities
1877 * @buf_size: Size of the virtual buffer
1878 * @cap_count: cap count needed if AQ err==ENOMEM
1879 * @opc: capabilities type to discover - pass in the command opcode
1880 * @cd: pointer to command details structure or NULL
1882 * Get the function(0x000a)/device(0x000b) capabilities description from
1885 static enum ice_status
1886 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1887 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1889 struct ice_aqc_list_caps *cmd;
1890 struct ice_aq_desc desc;
1891 enum ice_status status;
1893 cmd = &desc.params.get_cap;
1895 if (opc != ice_aqc_opc_list_func_caps &&
1896 opc != ice_aqc_opc_list_dev_caps)
1897 return ICE_ERR_PARAM;
1899 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1901 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1903 ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc);
1904 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1905 *cap_count = LE32_TO_CPU(cmd->count);
1910 * ice_discover_caps - get info about the HW
1911 * @hw: pointer to the hardware structure
1912 * @opc: capabilities type to discover - pass in the command opcode
1914 static enum ice_status
1915 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1917 enum ice_status status;
1922 /* The driver doesn't know how many capabilities the device will return
1923 * so the buffer size required isn't known ahead of time. The driver
1924 * starts with cbuf_len and if this turns out to be insufficient, the
1925 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
1926 * The driver then allocates the buffer based on the count and retries
1927 * the operation. So it follows that the retry count is 2.
1929 #define ICE_GET_CAP_BUF_COUNT 40
1930 #define ICE_GET_CAP_RETRY_COUNT 2
1932 cap_count = ICE_GET_CAP_BUF_COUNT;
1933 retries = ICE_GET_CAP_RETRY_COUNT;
1938 cbuf_len = (u16)(cap_count *
1939 sizeof(struct ice_aqc_list_caps_elem));
1940 cbuf = ice_malloc(hw, cbuf_len);
1942 return ICE_ERR_NO_MEMORY;
1944 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1948 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1951 /* If ENOMEM is returned, try again with bigger buffer */
1952 } while (--retries);
1958 * ice_get_caps - get info about the HW
1959 * @hw: pointer to the hardware structure
1961 enum ice_status ice_get_caps(struct ice_hw *hw)
1963 enum ice_status status;
1965 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1967 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1973 * ice_aq_manage_mac_write - manage MAC address write command
1974 * @hw: pointer to the hw struct
1975 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
1976 * @flags: flags to control write behavior
1977 * @cd: pointer to command details structure or NULL
1979 * This function is used to write MAC address to the NVM (0x0108).
1982 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1983 struct ice_sq_cd *cd)
1985 struct ice_aqc_manage_mac_write *cmd;
1986 struct ice_aq_desc desc;
1988 cmd = &desc.params.mac_write;
1989 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1994 /* Prep values for flags, sah, sal */
1995 cmd->sah = HTONS(*((const u16 *)mac_addr));
1996 cmd->sal = HTONL(*((const u32 *)(mac_addr + 2)));
1998 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2002 * ice_aq_clear_pxe_mode
2003 * @hw: pointer to the hw struct
2005 * Tell the firmware that the driver is taking over from PXE (0x0110).
2007 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2009 struct ice_aq_desc desc;
2011 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2012 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2014 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2018 * ice_clear_pxe_mode - clear pxe operations mode
2019 * @hw: pointer to the hw struct
2021 * Make sure all PXE mode settings are cleared, including things
2022 * like descriptor fetch/write-back mode.
2024 void ice_clear_pxe_mode(struct ice_hw *hw)
2026 if (ice_check_sq_alive(hw, &hw->adminq))
2027 ice_aq_clear_pxe_mode(hw);
2032 * ice_get_link_speed_based_on_phy_type - returns link speed
2033 * @phy_type_low: lower part of phy_type
2034 * @phy_type_high: higher part of phy_type
2036 * This helper function will convert an entry in phy type structure
2037 * [phy_type_low, phy_type_high] to its corresponding link speed.
2038 * Note: In the structure of [phy_type_low, phy_type_high], there should
2039 * be one bit set, as this function will convert one phy type to its
2041 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2042 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2045 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2047 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2048 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2050 switch (phy_type_low) {
2051 case ICE_PHY_TYPE_LOW_100BASE_TX:
2052 case ICE_PHY_TYPE_LOW_100M_SGMII:
2053 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2055 case ICE_PHY_TYPE_LOW_1000BASE_T:
2056 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2057 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2058 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2059 case ICE_PHY_TYPE_LOW_1G_SGMII:
2060 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2062 case ICE_PHY_TYPE_LOW_2500BASE_T:
2063 case ICE_PHY_TYPE_LOW_2500BASE_X:
2064 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2065 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2067 case ICE_PHY_TYPE_LOW_5GBASE_T:
2068 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2069 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2071 case ICE_PHY_TYPE_LOW_10GBASE_T:
2072 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2073 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2074 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2075 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2076 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2077 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2078 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2080 case ICE_PHY_TYPE_LOW_25GBASE_T:
2081 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2082 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2083 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2084 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2085 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2086 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2087 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2088 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2089 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2090 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2091 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2093 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2094 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2095 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2096 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2097 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2098 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2099 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2101 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2102 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2103 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2104 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2105 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2106 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2107 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2108 case ICE_PHY_TYPE_LOW_50G_AUI2:
2109 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2110 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2111 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2112 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2113 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2114 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2115 case ICE_PHY_TYPE_LOW_50G_AUI1:
2116 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2118 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2119 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2120 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2121 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2122 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2123 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2124 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2125 case ICE_PHY_TYPE_LOW_100G_AUI4:
2126 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2127 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2128 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2129 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2130 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2131 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2134 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2138 switch (phy_type_high) {
2139 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2140 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2141 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2142 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2143 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2144 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2147 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2151 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2152 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2153 return ICE_AQ_LINK_SPEED_UNKNOWN;
2154 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2155 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2156 return ICE_AQ_LINK_SPEED_UNKNOWN;
2157 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2158 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2159 return speed_phy_type_low;
2161 return speed_phy_type_high;
2165 * ice_update_phy_type
2166 * @phy_type_low: pointer to the lower part of phy_type
2167 * @phy_type_high: pointer to the higher part of phy_type
2168 * @link_speeds_bitmap: targeted link speeds bitmap
2170 * Note: For the link_speeds_bitmap structure, you can check it at
2171 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2172 * link_speeds_bitmap include multiple speeds.
2174 * Each entry in this [phy_type_low, phy_type_high] structure will
2175 * present a certain link speed. This helper function will turn on bits
2176 * in [phy_type_low, phy_type_high] structure based on the value of
2177 * link_speeds_bitmap input parameter.
2180 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2181 u16 link_speeds_bitmap)
2183 u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
2188 /* We first check with low part of phy_type */
2189 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2190 pt_low = BIT_ULL(index);
2191 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2193 if (link_speeds_bitmap & speed)
2194 *phy_type_low |= BIT_ULL(index);
2197 /* We then check with high part of phy_type */
2198 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2199 pt_high = BIT_ULL(index);
2200 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2202 if (link_speeds_bitmap & speed)
2203 *phy_type_high |= BIT_ULL(index);
2208 * ice_aq_set_phy_cfg
2209 * @hw: pointer to the hw struct
2210 * @lport: logical port number
2211 * @cfg: structure with PHY configuration data to be set
2212 * @cd: pointer to command details structure or NULL
2214 * Set the various PHY configuration parameters supported on the Port.
2215 * One or more of the Set PHY config parameters may be ignored in an MFP
2216 * mode as the PF may not have the privilege to set some of the PHY Config
2217 * parameters. This status will be indicated by the command response (0x0601).
2220 ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
2221 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2223 struct ice_aq_desc desc;
2226 return ICE_ERR_PARAM;
2228 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2229 desc.params.set_phy.lport_num = lport;
2230 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2232 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2236 * ice_update_link_info - update status of the HW network link
2237 * @pi: port info structure of the interested logical port
2239 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2241 struct ice_aqc_get_phy_caps_data *pcaps;
2242 struct ice_phy_info *phy_info;
2243 enum ice_status status;
2247 return ICE_ERR_PARAM;
2251 pcaps = (struct ice_aqc_get_phy_caps_data *)
2252 ice_malloc(hw, sizeof(*pcaps));
2254 return ICE_ERR_NO_MEMORY;
2256 phy_info = &pi->phy;
2257 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2261 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2262 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
2267 ice_memcpy(phy_info->link_info.module_type, &pcaps->module_type,
2268 sizeof(phy_info->link_info.module_type),
2269 ICE_NONDMA_TO_NONDMA);
2272 ice_free(hw, pcaps);
2278 * @pi: port information structure
2279 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2280 * @ena_auto_link_update: enable automatic link update
2282 * Set the requested flow control mode.
2285 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2287 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2288 struct ice_aqc_get_phy_caps_data *pcaps;
2289 enum ice_status status;
2290 u8 pause_mask = 0x0;
2294 return ICE_ERR_PARAM;
2296 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2298 switch (pi->fc.req_mode) {
2300 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2301 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2303 case ICE_FC_RX_PAUSE:
2304 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2306 case ICE_FC_TX_PAUSE:
2307 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2313 pcaps = (struct ice_aqc_get_phy_caps_data *)
2314 ice_malloc(hw, sizeof(*pcaps));
2316 return ICE_ERR_NO_MEMORY;
2318 /* Get the current phy config */
2319 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2322 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2326 /* clear the old pause settings */
2327 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2328 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2329 /* set the new capabilities */
2330 cfg.caps |= pause_mask;
2331 /* If the capabilities have changed, then set the new config */
2332 if (cfg.caps != pcaps->caps) {
2333 int retry_count, retry_max = 10;
2335 /* Auto restart link so settings take effect */
2336 if (ena_auto_link_update)
2337 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2338 /* Copy over all the old settings */
2339 cfg.phy_type_high = pcaps->phy_type_high;
2340 cfg.phy_type_low = pcaps->phy_type_low;
2341 cfg.low_power_ctrl = pcaps->low_power_ctrl;
2342 cfg.eee_cap = pcaps->eee_cap;
2343 cfg.eeer_value = pcaps->eeer_value;
2344 cfg.link_fec_opt = pcaps->link_fec_options;
2346 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2348 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2352 /* Update the link info
2353 * It sometimes takes a really long time for link to
2354 * come back from the atomic reset. Thus, we wait a
2357 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2358 status = ice_update_link_info(pi);
2360 if (status == ICE_SUCCESS)
2363 ice_msec_delay(100, true);
2367 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2371 ice_free(hw, pcaps);
2376 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2377 * @caps: PHY ability structure to copy date from
2378 * @cfg: PHY configuration structure to copy data to
2380 * Helper function to copy AQC PHY get ability data to PHY set configuration
2384 ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2385 struct ice_aqc_set_phy_cfg_data *cfg)
2390 cfg->phy_type_low = caps->phy_type_low;
2391 cfg->phy_type_high = caps->phy_type_high;
2392 cfg->caps = caps->caps;
2393 cfg->low_power_ctrl = caps->low_power_ctrl;
2394 cfg->eee_cap = caps->eee_cap;
2395 cfg->eeer_value = caps->eeer_value;
2396 cfg->link_fec_opt = caps->link_fec_options;
2400 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2401 * @cfg: PHY configuration data to set FEC mode
2402 * @fec: FEC mode to configure
2404 * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
2405 * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
2406 * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
2409 ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2413 /* Clear auto FEC and RS bits, and AND BASE-R ability
2414 * bits and OR request bits.
2416 cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2417 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2418 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2419 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2420 ICE_AQC_PHY_FEC_25G_KR_REQ;
2423 /* Clear auto FEC and BASE-R bits, and AND RS ability
2424 * bits and OR request bits.
2426 cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2427 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2428 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2429 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2432 /* Clear auto FEC and all FEC option bits. */
2433 cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2434 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2437 /* AND auto FEC bit, and all caps bits. */
2438 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2444 * ice_get_link_status - get status of the HW network link
2445 * @pi: port information structure
2446 * @link_up: pointer to bool (true/false = linkup/linkdown)
2448 * Variable link_up is true if link is up, false if link is down.
2449 * The variable link_up is invalid if status is non zero. As a
2450 * result of this call, link status reporting becomes enabled
2452 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2454 struct ice_phy_info *phy_info;
2455 enum ice_status status = ICE_SUCCESS;
2457 if (!pi || !link_up)
2458 return ICE_ERR_PARAM;
2460 phy_info = &pi->phy;
2462 if (phy_info->get_link_info) {
2463 status = ice_update_link_info(pi);
2466 ice_debug(pi->hw, ICE_DBG_LINK,
2467 "get link status error, status = %d\n",
2471 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2477 * ice_aq_set_link_restart_an
2478 * @pi: pointer to the port information structure
2479 * @ena_link: if true: enable link, if false: disable link
2480 * @cd: pointer to command details structure or NULL
2482 * Sets up the link and restarts the Auto-Negotiation over the link.
2485 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2486 struct ice_sq_cd *cd)
2488 struct ice_aqc_restart_an *cmd;
2489 struct ice_aq_desc desc;
2491 cmd = &desc.params.restart_an;
2493 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2495 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2496 cmd->lport_num = pi->lport;
2498 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2500 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2502 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2506 * ice_aq_set_event_mask
2507 * @hw: pointer to the hw struct
2508 * @port_num: port number of the physical function
2509 * @mask: event mask to be set
2510 * @cd: pointer to command details structure or NULL
2512 * Set event mask (0x0613)
2515 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2516 struct ice_sq_cd *cd)
2518 struct ice_aqc_set_event_mask *cmd;
2519 struct ice_aq_desc desc;
2521 cmd = &desc.params.set_event_mask;
2523 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2525 cmd->lport_num = port_num;
2527 cmd->event_mask = CPU_TO_LE16(mask);
2528 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2532 * ice_aq_set_mac_loopback
2533 * @hw: pointer to the hw struct
2534 * @ena_lpbk: Enable or Disable loopback
2535 * @cd: pointer to command details structure or NULL
2537 * Enable/disable loopback on a given port
2540 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2542 struct ice_aqc_set_mac_lb *cmd;
2543 struct ice_aq_desc desc;
2545 cmd = &desc.params.set_mac_lb;
2547 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2549 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2551 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2556 * ice_aq_set_port_id_led
2557 * @pi: pointer to the port information
2558 * @is_orig_mode: is this LED set to original mode (by the net-list)
2559 * @cd: pointer to command details structure or NULL
2561 * Set LED value for the given port (0x06e9)
2564 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2565 struct ice_sq_cd *cd)
2567 struct ice_aqc_set_port_id_led *cmd;
2568 struct ice_hw *hw = pi->hw;
2569 struct ice_aq_desc desc;
2571 cmd = &desc.params.set_port_id_led;
2573 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2577 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2579 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2581 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2585 * __ice_aq_get_set_rss_lut
2586 * @hw: pointer to the hardware structure
2587 * @vsi_id: VSI FW index
2588 * @lut_type: LUT table type
2589 * @lut: pointer to the LUT buffer provided by the caller
2590 * @lut_size: size of the LUT buffer
2591 * @glob_lut_idx: global LUT index
2592 * @set: set true to set the table, false to get the table
2594 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
2596 static enum ice_status
2597 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2598 u16 lut_size, u8 glob_lut_idx, bool set)
2600 struct ice_aqc_get_set_rss_lut *cmd_resp;
2601 struct ice_aq_desc desc;
2602 enum ice_status status;
2605 cmd_resp = &desc.params.get_set_rss_lut;
2608 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2609 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2611 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2614 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
2615 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2616 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2617 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2620 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2621 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2622 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2623 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2624 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2627 status = ICE_ERR_PARAM;
2628 goto ice_aq_get_set_rss_lut_exit;
2631 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2632 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2633 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2636 goto ice_aq_get_set_rss_lut_send;
2637 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2639 goto ice_aq_get_set_rss_lut_send;
2641 goto ice_aq_get_set_rss_lut_send;
2644 /* LUT size is only valid for Global and PF table types */
2646 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2647 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
2648 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2649 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2651 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2652 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2653 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2654 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2656 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2657 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2658 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2659 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2660 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2665 status = ICE_ERR_PARAM;
2666 goto ice_aq_get_set_rss_lut_exit;
2669 ice_aq_get_set_rss_lut_send:
2670 cmd_resp->flags = CPU_TO_LE16(flags);
2671 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2673 ice_aq_get_set_rss_lut_exit:
2678 * ice_aq_get_rss_lut
2679 * @hw: pointer to the hardware structure
2680 * @vsi_handle: software VSI handle
2681 * @lut_type: LUT table type
2682 * @lut: pointer to the LUT buffer provided by the caller
2683 * @lut_size: size of the LUT buffer
2685 * get the RSS lookup table, PF or VSI type
2688 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2689 u8 *lut, u16 lut_size)
2691 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2692 return ICE_ERR_PARAM;
2694 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2695 lut_type, lut, lut_size, 0, false);
2699 * ice_aq_set_rss_lut
2700 * @hw: pointer to the hardware structure
2701 * @vsi_handle: software VSI handle
2702 * @lut_type: LUT table type
2703 * @lut: pointer to the LUT buffer provided by the caller
2704 * @lut_size: size of the LUT buffer
2706 * set the RSS lookup table, PF or VSI type
2709 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2710 u8 *lut, u16 lut_size)
2712 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2713 return ICE_ERR_PARAM;
2715 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2716 lut_type, lut, lut_size, 0, true);
2720 * __ice_aq_get_set_rss_key
2721 * @hw: pointer to the hw struct
2722 * @vsi_id: VSI FW index
2723 * @key: pointer to key info struct
2724 * @set: set true to set the key, false to get the key
2726 * get (0x0B04) or set (0x0B02) the RSS key per VSI
2729 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2730 struct ice_aqc_get_set_rss_keys *key,
2733 struct ice_aqc_get_set_rss_key *cmd_resp;
2734 u16 key_size = sizeof(*key);
2735 struct ice_aq_desc desc;
2737 cmd_resp = &desc.params.get_set_rss_key;
2740 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2741 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2743 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2746 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
2747 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2748 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2749 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2751 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2755 * ice_aq_get_rss_key
2756 * @hw: pointer to the hw struct
2757 * @vsi_handle: software VSI handle
2758 * @key: pointer to key info struct
2760 * get the RSS key per VSI
2763 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2764 struct ice_aqc_get_set_rss_keys *key)
2766 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2767 return ICE_ERR_PARAM;
2769 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2774 * ice_aq_set_rss_key
2775 * @hw: pointer to the hw struct
2776 * @vsi_handle: software VSI handle
2777 * @keys: pointer to key info struct
2779 * set the RSS key per VSI
2782 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2783 struct ice_aqc_get_set_rss_keys *keys)
2785 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2786 return ICE_ERR_PARAM;
2788 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2793 * ice_aq_add_lan_txq
2794 * @hw: pointer to the hardware structure
2795 * @num_qgrps: Number of added queue groups
2796 * @qg_list: list of queue groups to be added
2797 * @buf_size: size of buffer for indirect command
2798 * @cd: pointer to command details structure or NULL
2800 * Add Tx LAN queue (0x0C30)
2803 * Prior to calling add Tx LAN queue:
2804 * Initialize the following as part of the Tx queue context:
2805 * Completion queue ID if the queue uses Completion queue, Quanta profile,
2806 * Cache profile and Packet shaper profile.
2808 * After add Tx LAN queue AQ command is completed:
2809 * Interrupts should be associated with specific queues,
2810 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
2813 static enum ice_status
2814 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2815 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2816 struct ice_sq_cd *cd)
2818 u16 i, sum_header_size, sum_q_size = 0;
2819 struct ice_aqc_add_tx_qgrp *list;
2820 struct ice_aqc_add_txqs *cmd;
2821 struct ice_aq_desc desc;
2823 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_lan_txq");
2825 cmd = &desc.params.add_txqs;
2827 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2830 return ICE_ERR_PARAM;
2832 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2833 return ICE_ERR_PARAM;
2835 sum_header_size = num_qgrps *
2836 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
2839 for (i = 0; i < num_qgrps; i++) {
2840 struct ice_aqc_add_txqs_perq *q = list->txqs;
2842 sum_q_size += list->num_txqs * sizeof(*q);
2843 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2846 if (buf_size != (sum_header_size + sum_q_size))
2847 return ICE_ERR_PARAM;
2849 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2851 cmd->num_qgrps = num_qgrps;
2853 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2857 * ice_aq_dis_lan_txq
2858 * @hw: pointer to the hardware structure
2859 * @num_qgrps: number of groups in the list
2860 * @qg_list: the list of groups to disable
2861 * @buf_size: the total size of the qg_list buffer in bytes
2862 * @rst_src: if called due to reset, specifies the rst source
2863 * @vmvf_num: the relative vm or vf number that is undergoing the reset
2864 * @cd: pointer to command details structure or NULL
2866 * Disable LAN Tx queue (0x0C31)
2868 static enum ice_status
2869 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2870 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2871 enum ice_disq_rst_src rst_src, u16 vmvf_num,
2872 struct ice_sq_cd *cd)
2874 struct ice_aqc_dis_txqs *cmd;
2875 struct ice_aq_desc desc;
2876 enum ice_status status;
2879 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_dis_lan_txq");
2880 cmd = &desc.params.dis_txqs;
2881 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2883 /* qg_list can be NULL only in VM/VF reset flow */
2884 if (!qg_list && !rst_src)
2885 return ICE_ERR_PARAM;
2887 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2888 return ICE_ERR_PARAM;
2890 cmd->num_entries = num_qgrps;
2892 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2893 ICE_AQC_Q_DIS_TIMEOUT_M);
2897 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2898 cmd->vmvf_and_timeout |=
2899 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2906 /* flush pipe on time out */
2907 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2908 /* If no queue group info, we are in a reset flow. Issue the AQ */
2912 /* set RD bit to indicate that command buffer is provided by the driver
2913 * and it needs to be read by the firmware
2915 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2917 for (i = 0; i < num_qgrps; ++i) {
2918 /* Calculate the size taken up by the queue IDs in this group */
2919 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2921 /* Add the size of the group header */
2922 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2924 /* If the num of queues is even, add 2 bytes of padding */
2925 if ((qg_list[i].num_qs % 2) == 0)
2930 return ICE_ERR_PARAM;
2933 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2936 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2937 vmvf_num, hw->adminq.sq_last_status);
2939 ice_debug(hw, ICE_DBG_SCHED, "disable Q %d failed %d\n",
2940 LE16_TO_CPU(qg_list[0].q_id[0]),
2941 hw->adminq.sq_last_status);
2947 /* End of FW Admin Queue command wrappers */
2950 * ice_write_byte - write a byte to a packed context structure
2951 * @src_ctx: the context structure to read from
2952 * @dest_ctx: the context to be written to
2953 * @ce_info: a description of the struct to be filled
2956 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2958 u8 src_byte, dest_byte, mask;
2962 /* copy from the next struct field */
2963 from = src_ctx + ce_info->offset;
2965 /* prepare the bits and mask */
2966 shift_width = ce_info->lsb % 8;
2967 mask = (u8)(BIT(ce_info->width) - 1);
2972 /* shift to correct alignment */
2973 mask <<= shift_width;
2974 src_byte <<= shift_width;
2976 /* get the current bits from the target bit string */
2977 dest = dest_ctx + (ce_info->lsb / 8);
2979 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
2981 dest_byte &= ~mask; /* get the bits not changing */
2982 dest_byte |= src_byte; /* add in the new bits */
2984 /* put it all back */
2985 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
2989 * ice_write_word - write a word to a packed context structure
2990 * @src_ctx: the context structure to read from
2991 * @dest_ctx: the context to be written to
2992 * @ce_info: a description of the struct to be filled
2995 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3002 /* copy from the next struct field */
3003 from = src_ctx + ce_info->offset;
3005 /* prepare the bits and mask */
3006 shift_width = ce_info->lsb % 8;
3007 mask = BIT(ce_info->width) - 1;
3009 /* don't swizzle the bits until after the mask because the mask bits
3010 * will be in a different bit position on big endian machines
3012 src_word = *(u16 *)from;
3015 /* shift to correct alignment */
3016 mask <<= shift_width;
3017 src_word <<= shift_width;
3019 /* get the current bits from the target bit string */
3020 dest = dest_ctx + (ce_info->lsb / 8);
3022 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3024 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3025 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3027 /* put it all back */
3028 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3032 * ice_write_dword - write a dword to a packed context structure
3033 * @src_ctx: the context structure to read from
3034 * @dest_ctx: the context to be written to
3035 * @ce_info: a description of the struct to be filled
3038 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3040 u32 src_dword, mask;
3045 /* copy from the next struct field */
3046 from = src_ctx + ce_info->offset;
3048 /* prepare the bits and mask */
3049 shift_width = ce_info->lsb % 8;
3051 /* if the field width is exactly 32 on an x86 machine, then the shift
3052 * operation will not work because the SHL instructions count is masked
3053 * to 5 bits so the shift will do nothing
3055 if (ce_info->width < 32)
3056 mask = BIT(ce_info->width) - 1;
3060 /* don't swizzle the bits until after the mask because the mask bits
3061 * will be in a different bit position on big endian machines
3063 src_dword = *(u32 *)from;
3066 /* shift to correct alignment */
3067 mask <<= shift_width;
3068 src_dword <<= shift_width;
3070 /* get the current bits from the target bit string */
3071 dest = dest_ctx + (ce_info->lsb / 8);
3073 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3075 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3076 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3078 /* put it all back */
3079 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3083 * ice_write_qword - write a qword to a packed context structure
3084 * @src_ctx: the context structure to read from
3085 * @dest_ctx: the context to be written to
3086 * @ce_info: a description of the struct to be filled
3089 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3091 u64 src_qword, mask;
3096 /* copy from the next struct field */
3097 from = src_ctx + ce_info->offset;
3099 /* prepare the bits and mask */
3100 shift_width = ce_info->lsb % 8;
3102 /* if the field width is exactly 64 on an x86 machine, then the shift
3103 * operation will not work because the SHL instructions count is masked
3104 * to 6 bits so the shift will do nothing
3106 if (ce_info->width < 64)
3107 mask = BIT_ULL(ce_info->width) - 1;
3111 /* don't swizzle the bits until after the mask because the mask bits
3112 * will be in a different bit position on big endian machines
3114 src_qword = *(u64 *)from;
3117 /* shift to correct alignment */
3118 mask <<= shift_width;
3119 src_qword <<= shift_width;
3121 /* get the current bits from the target bit string */
3122 dest = dest_ctx + (ce_info->lsb / 8);
3124 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3126 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
3127 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
3129 /* put it all back */
3130 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3134 * ice_set_ctx - set context bits in packed structure
3135 * @src_ctx: pointer to a generic non-packed context structure
3136 * @dest_ctx: pointer to memory for the packed structure
3137 * @ce_info: a description of the structure to be transformed
3140 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3144 for (f = 0; ce_info[f].width; f++) {
3145 /* We have to deal with each element of the FW response
3146 * using the correct size so that we are correct regardless
3147 * of the endianness of the machine.
3149 switch (ce_info[f].size_of) {
3151 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3154 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3157 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3160 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3163 return ICE_ERR_INVAL_SIZE;
3176 * @pi: port information structure
3177 * @vsi_handle: software VSI handle
3179 * @num_qgrps: Number of added queue groups
3180 * @buf: list of queue groups to be added
3181 * @buf_size: size of buffer for indirect command
3182 * @cd: pointer to command details structure or NULL
3184 * This function adds one lan q
3187 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
3188 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3189 struct ice_sq_cd *cd)
3191 struct ice_aqc_txsched_elem_data node = { 0 };
3192 struct ice_sched_node *parent;
3193 enum ice_status status;
3196 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3199 if (num_qgrps > 1 || buf->num_txqs > 1)
3200 return ICE_ERR_MAX_LIMIT;
3204 if (!ice_is_vsi_valid(hw, vsi_handle))
3205 return ICE_ERR_PARAM;
3207 ice_acquire_lock(&pi->sched_lock);
3209 /* find a parent node */
3210 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3211 ICE_SCHED_NODE_OWNER_LAN);
3213 status = ICE_ERR_PARAM;
3217 buf->parent_teid = parent->info.node_teid;
3218 node.parent_teid = parent->info.node_teid;
3219 /* Mark that the values in the "generic" section as valid. The default
3220 * value in the "generic" section is zero. This means that :
3221 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
3222 * - 0 priority among siblings, indicated by Bit 1-3.
3223 * - WFQ, indicated by Bit 4.
3224 * - 0 Adjustment value is used in PSM credit update flow, indicated by
3226 * - Bit 7 is reserved.
3227 * Without setting the generic section as valid in valid_sections, the
3228 * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
3230 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3233 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3234 if (status != ICE_SUCCESS) {
3235 ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n",
3236 LE16_TO_CPU(buf->txqs[0].txq_id),
3237 hw->adminq.sq_last_status);
3241 node.node_teid = buf->txqs[0].q_teid;
3242 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3244 /* add a leaf node into schduler tree q layer */
3245 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3248 ice_release_lock(&pi->sched_lock);
3254 * @pi: port information structure
3255 * @num_queues: number of queues
3256 * @q_ids: pointer to the q_id array
3257 * @q_teids: pointer to queue node teids
3258 * @rst_src: if called due to reset, specifies the rst source
3259 * @vmvf_num: the relative vm or vf number that is undergoing the reset
3260 * @cd: pointer to command details structure or NULL
3262 * This function removes queues and their corresponding nodes in SW DB
3265 ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
3266 u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
3267 struct ice_sq_cd *cd)
3269 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3270 struct ice_aqc_dis_txq_item qg_list;
3273 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3276 /* if queue is disabled already yet the disable queue command has to be
3277 * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
3278 * any queue information
3281 if (!num_queues && rst_src)
3282 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
3285 ice_acquire_lock(&pi->sched_lock);
3287 for (i = 0; i < num_queues; i++) {
3288 struct ice_sched_node *node;
3290 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3293 qg_list.parent_teid = node->info.parent_teid;
3295 qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
3296 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3297 sizeof(qg_list), rst_src, vmvf_num,
3300 if (status != ICE_SUCCESS)
3302 ice_free_sched_node(pi, node);
3304 ice_release_lock(&pi->sched_lock);
3309 * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
3310 * @pi: port information structure
3311 * @vsi_handle: software VSI handle
3312 * @tc_bitmap: TC bitmap
3313 * @maxqs: max queues array per TC
3314 * @owner: lan or rdma
3316 * This function adds/updates the VSI queues per TC.
3318 static enum ice_status
3319 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3320 u16 *maxqs, u8 owner)
3322 enum ice_status status = ICE_SUCCESS;
3325 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3328 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3329 return ICE_ERR_PARAM;
3331 ice_acquire_lock(&pi->sched_lock);
3333 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
3334 /* configuration is possible only if TC node is present */
3335 if (!ice_sched_get_tc_node(pi, i))
3338 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3339 ice_is_tc_ena(tc_bitmap, i));
3344 ice_release_lock(&pi->sched_lock);
3349 * ice_cfg_vsi_lan - configure VSI lan queues
3350 * @pi: port information structure
3351 * @vsi_handle: software VSI handle
3352 * @tc_bitmap: TC bitmap
3353 * @max_lanqs: max lan queues array per TC
3355 * This function adds/updates the VSI lan queues per TC.
3358 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3361 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3362 ICE_SCHED_NODE_OWNER_LAN);
3368 * ice_replay_pre_init - replay pre initialization
3369 * @hw: pointer to the hw struct
3371 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
3373 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3375 struct ice_switch_info *sw = hw->switch_info;
3378 /* Delete old entries from replay filter list head if there is any */
3379 ice_rm_all_sw_replay_rule_info(hw);
3380 /* In start of replay, move entries into replay_rules list, it
3381 * will allow adding rules entries back to filt_rules list,
3382 * which is operational list.
3384 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
3385 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
3386 &sw->recp_list[i].filt_replay_rules);
3387 ice_sched_replay_agg_vsi_preinit(hw);
3389 return ice_sched_replay_tc_node_bw(hw);
3393 * ice_replay_vsi - replay vsi configuration
3394 * @hw: pointer to the hw struct
3395 * @vsi_handle: driver vsi handle
3397 * Restore all VSI configuration after reset. It is required to call this
3398 * function with main VSI first.
3400 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3402 enum ice_status status;
3404 if (!ice_is_vsi_valid(hw, vsi_handle))
3405 return ICE_ERR_PARAM;
3407 /* Replay pre-initialization if there is any */
3408 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3409 status = ice_replay_pre_init(hw);
3414 /* Replay per VSI all filters */
3415 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3417 status = ice_replay_vsi_agg(hw, vsi_handle);
3422 * ice_replay_post - post replay configuration cleanup
3423 * @hw: pointer to the hw struct
3425 * Post replay cleanup.
3427 void ice_replay_post(struct ice_hw *hw)
3429 /* Delete old entries from replay filter list head */
3430 ice_rm_all_sw_replay_rule_info(hw);
3431 ice_sched_replay_agg(hw);
3435 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
3436 * @hw: ptr to the hardware info
3437 * @hireg: high 32 bit HW register to read from
3438 * @loreg: low 32 bit HW register to read from
3439 * @prev_stat_loaded: bool to specify if previous stats are loaded
3440 * @prev_stat: ptr to previous loaded stat value
3441 * @cur_stat: ptr to current stat value
3444 ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
3445 bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
3449 new_data = rd32(hw, loreg);
3450 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3452 /* device stats are not reset at PFR, they likely will not be zeroed
3453 * when the driver starts. So save the first values read and use them as
3454 * offsets to be subtracted from the raw values in order to report stats
3455 * that count from zero.
3457 if (!prev_stat_loaded)
3458 *prev_stat = new_data;
3459 if (new_data >= *prev_stat)
3460 *cur_stat = new_data - *prev_stat;
3462 /* to manage the potential roll-over */
3463 *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
3464 *cur_stat &= 0xFFFFFFFFFFULL;
3468 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
3469 * @hw: ptr to the hardware info
3470 * @reg: HW register to read from
3471 * @prev_stat_loaded: bool to specify if previous stats are loaded
3472 * @prev_stat: ptr to previous loaded stat value
3473 * @cur_stat: ptr to current stat value
3476 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3477 u64 *prev_stat, u64 *cur_stat)
3481 new_data = rd32(hw, reg);
3483 /* device stats are not reset at PFR, they likely will not be zeroed
3484 * when the driver starts. So save the first values read and use them as
3485 * offsets to be subtracted from the raw values in order to report stats
3486 * that count from zero.
3488 if (!prev_stat_loaded)
3489 *prev_stat = new_data;
3490 if (new_data >= *prev_stat)
3491 *cur_stat = new_data - *prev_stat;
3493 /* to manage the potential roll-over */
3494 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
3499 * ice_sched_query_elem - query element information from hw
3500 * @hw: pointer to the hw struct
3501 * @node_teid: node teid to be queried
3502 * @buf: buffer to element information
3504 * This function queries HW element information
3507 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3508 struct ice_aqc_get_elem *buf)
3510 u16 buf_size, num_elem_ret = 0;
3511 enum ice_status status;
3513 buf_size = sizeof(*buf);
3514 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
3515 buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
3516 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3518 if (status != ICE_SUCCESS || num_elem_ret != 1)
3519 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");