1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 300
15 * ice_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
18 * This function sets the MAC type of the adapter based on the
19 * vendor ID and device ID stored in the HW structure.
21 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
23 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
25 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
26 return ICE_ERR_DEVICE_NOT_SUPPORTED;
28 switch (hw->device_id) {
29 case ICE_DEV_ID_E810C_BACKPLANE:
30 case ICE_DEV_ID_E810C_QSFP:
31 case ICE_DEV_ID_E810C_SFP:
32 case ICE_DEV_ID_E810_XXV_BACKPLANE:
33 case ICE_DEV_ID_E810_XXV_QSFP:
34 case ICE_DEV_ID_E810_XXV_SFP:
35 hw->mac_type = ICE_MAC_E810;
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 hw->mac_type = ICE_MAC_GENERIC;
54 hw->mac_type = ICE_MAC_UNKNOWN;
58 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
63 * ice_clear_pf_cfg - Clear PF configuration
64 * @hw: pointer to the hardware structure
66 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
67 * configuration, flow director filters, etc.).
69 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
71 struct ice_aq_desc desc;
73 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
75 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
79 * ice_aq_manage_mac_read - manage MAC address read command
80 * @hw: pointer to the HW struct
81 * @buf: a virtual buffer to hold the manage MAC read response
82 * @buf_size: Size of the virtual buffer
83 * @cd: pointer to command details structure or NULL
85 * This function is used to return per PF station MAC address (0x0107).
86 * NOTE: Upon successful completion of this command, MAC address information
87 * is returned in user specified buffer. Please interpret user specified
88 * buffer as "manage_mac_read" response.
89 * Response such as various MAC addresses are stored in HW struct (port.mac)
90 * ice_discover_dev_caps is expected to be called before this function is
93 static enum ice_status
94 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
97 struct ice_aqc_manage_mac_read_resp *resp;
98 struct ice_aqc_manage_mac_read *cmd;
99 struct ice_aq_desc desc;
100 enum ice_status status;
104 cmd = &desc.params.mac_read;
106 if (buf_size < sizeof(*resp))
107 return ICE_ERR_BUF_TOO_SHORT;
109 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
111 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
115 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
116 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
118 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
119 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
123 /* A single port can report up to two (LAN and WoL) addresses */
124 for (i = 0; i < cmd->num_addr; i++)
125 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
126 ice_memcpy(hw->port_info->mac.lan_addr,
127 resp[i].mac_addr, ETH_ALEN,
129 ice_memcpy(hw->port_info->mac.perm_addr,
131 ETH_ALEN, ICE_DMA_TO_NONDMA);
138 * ice_aq_get_phy_caps - returns PHY capabilities
139 * @pi: port information structure
140 * @qual_mods: report qualified modules
141 * @report_mode: report mode capabilities
142 * @pcaps: structure for PHY capabilities to be filled
143 * @cd: pointer to command details structure or NULL
145 * Returns the various PHY capabilities supported on the Port (0x0600)
148 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
149 struct ice_aqc_get_phy_caps_data *pcaps,
150 struct ice_sq_cd *cd)
152 struct ice_aqc_get_phy_caps *cmd;
153 u16 pcaps_size = sizeof(*pcaps);
154 struct ice_aq_desc desc;
155 enum ice_status status;
158 cmd = &desc.params.get_phy;
160 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
161 return ICE_ERR_PARAM;
164 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
167 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
169 cmd->param0 |= CPU_TO_LE16(report_mode);
170 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
172 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
174 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
175 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
176 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
177 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
178 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
179 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
180 pcaps->low_power_ctrl_an);
181 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
182 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
184 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
185 pcaps->link_fec_options);
186 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
187 pcaps->module_compliance_enforcement);
188 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
189 pcaps->extended_compliance_code);
190 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
191 pcaps->module_type[0]);
192 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
193 pcaps->module_type[1]);
194 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
195 pcaps->module_type[2]);
197 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
198 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
199 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
200 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
201 sizeof(pi->phy.link_info.module_type),
202 ICE_NONDMA_TO_NONDMA);
209 * ice_aq_get_link_topo_handle - get link topology node return status
210 * @pi: port information structure
211 * @node_type: requested node type
212 * @cd: pointer to command details structure or NULL
214 * Get link topology node return status for specified node type (0x06E0)
216 * Node type cage can be used to determine if cage is present. If AQC
217 * returns error (ENOENT), then no cage present. If no cage present, then
218 * connection type is backplane or BASE-T.
220 static enum ice_status
221 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
222 struct ice_sq_cd *cd)
224 struct ice_aqc_get_link_topo *cmd;
225 struct ice_aq_desc desc;
227 cmd = &desc.params.get_link_topo;
229 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
231 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
232 ICE_AQC_LINK_TOPO_NODE_CTX_S);
235 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
237 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
241 * ice_is_media_cage_present
242 * @pi: port information structure
244 * Returns true if media cage is present, else false. If no cage, then
245 * media type is backplane or BASE-T.
247 static bool ice_is_media_cage_present(struct ice_port_info *pi)
249 /* Node type cage can be used to determine if cage is present. If AQC
250 * returns error (ENOENT), then no cage present. If no cage present then
251 * connection type is backplane or BASE-T.
253 return !ice_aq_get_link_topo_handle(pi,
254 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
259 * ice_get_media_type - Gets media type
260 * @pi: port information structure
262 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
264 struct ice_link_status *hw_link_info;
267 return ICE_MEDIA_UNKNOWN;
269 hw_link_info = &pi->phy.link_info;
270 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
271 /* If more than one media type is selected, report unknown */
272 return ICE_MEDIA_UNKNOWN;
274 if (hw_link_info->phy_type_low) {
275 /* 1G SGMII is a special case where some DA cable PHYs
276 * may show this as an option when it really shouldn't
277 * be since SGMII is meant to be between a MAC and a PHY
278 * in a backplane. Try to detect this case and handle it
280 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
281 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
282 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
283 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
284 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
287 switch (hw_link_info->phy_type_low) {
288 case ICE_PHY_TYPE_LOW_1000BASE_SX:
289 case ICE_PHY_TYPE_LOW_1000BASE_LX:
290 case ICE_PHY_TYPE_LOW_10GBASE_SR:
291 case ICE_PHY_TYPE_LOW_10GBASE_LR:
292 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
293 case ICE_PHY_TYPE_LOW_25GBASE_SR:
294 case ICE_PHY_TYPE_LOW_25GBASE_LR:
295 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
296 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
297 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
298 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
299 case ICE_PHY_TYPE_LOW_50GBASE_SR:
300 case ICE_PHY_TYPE_LOW_50GBASE_FR:
301 case ICE_PHY_TYPE_LOW_50GBASE_LR:
302 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
303 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
304 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
305 case ICE_PHY_TYPE_LOW_100GBASE_DR:
306 return ICE_MEDIA_FIBER;
307 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
308 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
309 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
310 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
311 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
312 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
313 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
314 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
315 return ICE_MEDIA_FIBER;
316 case ICE_PHY_TYPE_LOW_100BASE_TX:
317 case ICE_PHY_TYPE_LOW_1000BASE_T:
318 case ICE_PHY_TYPE_LOW_2500BASE_T:
319 case ICE_PHY_TYPE_LOW_5GBASE_T:
320 case ICE_PHY_TYPE_LOW_10GBASE_T:
321 case ICE_PHY_TYPE_LOW_25GBASE_T:
322 return ICE_MEDIA_BASET;
323 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
324 case ICE_PHY_TYPE_LOW_25GBASE_CR:
325 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
326 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
327 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
328 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
329 case ICE_PHY_TYPE_LOW_50GBASE_CP:
330 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
331 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
332 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
334 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
335 case ICE_PHY_TYPE_LOW_40G_XLAUI:
336 case ICE_PHY_TYPE_LOW_50G_LAUI2:
337 case ICE_PHY_TYPE_LOW_50G_AUI2:
338 case ICE_PHY_TYPE_LOW_50G_AUI1:
339 case ICE_PHY_TYPE_LOW_100G_AUI4:
340 case ICE_PHY_TYPE_LOW_100G_CAUI4:
341 if (ice_is_media_cage_present(pi))
342 return ICE_MEDIA_AUI;
344 case ICE_PHY_TYPE_LOW_1000BASE_KX:
345 case ICE_PHY_TYPE_LOW_2500BASE_KX:
346 case ICE_PHY_TYPE_LOW_2500BASE_X:
347 case ICE_PHY_TYPE_LOW_5GBASE_KR:
348 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
349 case ICE_PHY_TYPE_LOW_25GBASE_KR:
350 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
351 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
352 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
353 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
354 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
355 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
356 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
357 return ICE_MEDIA_BACKPLANE;
360 switch (hw_link_info->phy_type_high) {
361 case ICE_PHY_TYPE_HIGH_100G_AUI2:
362 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
363 if (ice_is_media_cage_present(pi))
364 return ICE_MEDIA_AUI;
366 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
367 return ICE_MEDIA_BACKPLANE;
368 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
369 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
370 return ICE_MEDIA_FIBER;
373 return ICE_MEDIA_UNKNOWN;
377 * ice_aq_get_link_info
378 * @pi: port information structure
379 * @ena_lse: enable/disable LinkStatusEvent reporting
380 * @link: pointer to link status structure - optional
381 * @cd: pointer to command details structure or NULL
383 * Get Link Status (0x607). Returns the link status of the adapter.
386 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
387 struct ice_link_status *link, struct ice_sq_cd *cd)
389 struct ice_aqc_get_link_status_data link_data = { 0 };
390 struct ice_aqc_get_link_status *resp;
391 struct ice_link_status *li_old, *li;
392 enum ice_media_type *hw_media_type;
393 struct ice_fc_info *hw_fc_info;
394 bool tx_pause, rx_pause;
395 struct ice_aq_desc desc;
396 enum ice_status status;
401 return ICE_ERR_PARAM;
403 li_old = &pi->phy.link_info_old;
404 hw_media_type = &pi->phy.media_type;
405 li = &pi->phy.link_info;
406 hw_fc_info = &pi->fc;
408 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
409 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
410 resp = &desc.params.get_link_status;
411 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
412 resp->lport_num = pi->lport;
414 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
416 if (status != ICE_SUCCESS)
419 /* save off old link status information */
422 /* update current link status information */
423 li->link_speed = LE16_TO_CPU(link_data.link_speed);
424 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
425 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
426 *hw_media_type = ice_get_media_type(pi);
427 li->link_info = link_data.link_info;
428 li->an_info = link_data.an_info;
429 li->ext_info = link_data.ext_info;
430 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
431 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
432 li->topo_media_conflict = link_data.topo_media_conflict;
433 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
434 ICE_AQ_CFG_PACING_TYPE_M);
437 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
438 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
439 if (tx_pause && rx_pause)
440 hw_fc_info->current_mode = ICE_FC_FULL;
442 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
444 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
446 hw_fc_info->current_mode = ICE_FC_NONE;
448 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
450 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
451 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
452 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
453 (unsigned long long)li->phy_type_low);
454 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
455 (unsigned long long)li->phy_type_high);
456 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
457 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
458 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
459 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
460 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
461 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
462 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
464 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
466 /* save link status information */
470 /* flag cleared so calling functions don't call AQ again */
471 pi->phy.get_link_info = false;
477 * ice_fill_tx_timer_and_fc_thresh
478 * @hw: pointer to the HW struct
479 * @cmd: pointer to MAC cfg structure
481 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
485 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
486 struct ice_aqc_set_mac_cfg *cmd)
488 u16 fc_thres_val, tx_timer_val;
491 /* We read back the transmit timer and fc threshold value of
492 * LFC. Thus, we will use index =
493 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
495 * Also, because we are opearating on transmit timer and fc
496 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
498 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
500 /* Retrieve the transmit timer */
501 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
503 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
504 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
506 /* Retrieve the fc threshold */
507 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
508 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
510 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
515 * @hw: pointer to the HW struct
516 * @max_frame_size: Maximum Frame Size to be supported
517 * @cd: pointer to command details structure or NULL
519 * Set MAC configuration (0x0603)
522 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
524 struct ice_aqc_set_mac_cfg *cmd;
525 struct ice_aq_desc desc;
527 cmd = &desc.params.set_mac_cfg;
529 if (max_frame_size == 0)
530 return ICE_ERR_PARAM;
532 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
534 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
536 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
538 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
542 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
543 * @hw: pointer to the HW struct
545 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
547 struct ice_switch_info *sw;
548 enum ice_status status;
550 hw->switch_info = (struct ice_switch_info *)
551 ice_malloc(hw, sizeof(*hw->switch_info));
553 sw = hw->switch_info;
556 return ICE_ERR_NO_MEMORY;
558 INIT_LIST_HEAD(&sw->vsi_list_map_head);
559 sw->prof_res_bm_init = 0;
561 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
563 ice_free(hw, hw->switch_info);
570 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
571 * @hw: pointer to the HW struct
572 * @sw: pointer to switch info struct for which function clears filters
575 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
577 struct ice_vsi_list_map_info *v_pos_map;
578 struct ice_vsi_list_map_info *v_tmp_map;
579 struct ice_sw_recipe *recps;
585 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
586 ice_vsi_list_map_info, list_entry) {
587 LIST_DEL(&v_pos_map->list_entry);
588 ice_free(hw, v_pos_map);
590 recps = sw->recp_list;
591 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
592 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
594 recps[i].root_rid = i;
595 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
596 &recps[i].rg_list, ice_recp_grp_entry,
598 LIST_DEL(&rg_entry->l_entry);
599 ice_free(hw, rg_entry);
602 if (recps[i].adv_rule) {
603 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
604 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
606 ice_destroy_lock(&recps[i].filt_rule_lock);
607 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
608 &recps[i].filt_rules,
609 ice_adv_fltr_mgmt_list_entry,
611 LIST_DEL(&lst_itr->list_entry);
612 ice_free(hw, lst_itr->lkups);
613 ice_free(hw, lst_itr);
616 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
618 ice_destroy_lock(&recps[i].filt_rule_lock);
619 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
620 &recps[i].filt_rules,
621 ice_fltr_mgmt_list_entry,
623 LIST_DEL(&lst_itr->list_entry);
624 ice_free(hw, lst_itr);
627 if (recps[i].root_buf)
628 ice_free(hw, recps[i].root_buf);
630 ice_rm_sw_replay_rule_info(hw, sw);
631 ice_free(hw, sw->recp_list);
636 * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
637 * @hw: pointer to the HW struct
639 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
641 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
645 * ice_get_itr_intrl_gran
646 * @hw: pointer to the HW struct
648 * Determines the ITR/INTRL granularities based on the maximum aggregate
649 * bandwidth according to the device's configuration during power-on.
651 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
653 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
654 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
655 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
657 switch (max_agg_bw) {
658 case ICE_MAX_AGG_BW_200G:
659 case ICE_MAX_AGG_BW_100G:
660 case ICE_MAX_AGG_BW_50G:
661 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
662 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
664 case ICE_MAX_AGG_BW_25G:
665 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
666 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
672 * ice_print_rollback_msg - print FW rollback message
673 * @hw: pointer to the hardware structure
675 void ice_print_rollback_msg(struct ice_hw *hw)
677 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
678 struct ice_nvm_info *nvm = &hw->nvm;
679 struct ice_orom_info *orom;
683 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
684 nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
685 orom->build, orom->patch);
687 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
688 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
692 * ice_init_hw - main hardware initialization routine
693 * @hw: pointer to the hardware structure
695 enum ice_status ice_init_hw(struct ice_hw *hw)
697 struct ice_aqc_get_phy_caps_data *pcaps;
698 enum ice_status status;
702 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
704 /* Set MAC type based on DeviceID */
705 status = ice_set_mac_type(hw);
709 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
710 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
711 PF_FUNC_RID_FUNCTION_NUMBER_S;
713 status = ice_reset(hw, ICE_RESET_PFR);
717 ice_get_itr_intrl_gran(hw);
719 status = ice_create_all_ctrlq(hw);
721 goto err_unroll_cqinit;
723 status = ice_init_nvm(hw);
725 goto err_unroll_cqinit;
727 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
728 ice_print_rollback_msg(hw);
730 status = ice_clear_pf_cfg(hw);
732 goto err_unroll_cqinit;
734 /* Set bit to enable Flow Director filters */
735 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
736 INIT_LIST_HEAD(&hw->fdir_list_head);
738 ice_clear_pxe_mode(hw);
740 status = ice_get_caps(hw);
742 goto err_unroll_cqinit;
744 hw->port_info = (struct ice_port_info *)
745 ice_malloc(hw, sizeof(*hw->port_info));
746 if (!hw->port_info) {
747 status = ICE_ERR_NO_MEMORY;
748 goto err_unroll_cqinit;
751 /* set the back pointer to HW */
752 hw->port_info->hw = hw;
754 /* Initialize port_info struct with switch configuration data */
755 status = ice_get_initial_sw_cfg(hw);
757 goto err_unroll_alloc;
760 /* Query the allocated resources for Tx scheduler */
761 status = ice_sched_query_res_alloc(hw);
763 ice_debug(hw, ICE_DBG_SCHED,
764 "Failed to get scheduler allocated resources\n");
765 goto err_unroll_alloc;
767 ice_sched_get_psm_clk_freq(hw);
769 /* Initialize port_info struct with scheduler data */
770 status = ice_sched_init_port(hw->port_info);
772 goto err_unroll_sched;
774 pcaps = (struct ice_aqc_get_phy_caps_data *)
775 ice_malloc(hw, sizeof(*pcaps));
777 status = ICE_ERR_NO_MEMORY;
778 goto err_unroll_sched;
781 /* Initialize port_info struct with PHY capabilities */
782 status = ice_aq_get_phy_caps(hw->port_info, false,
783 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
786 goto err_unroll_sched;
788 /* Initialize port_info struct with link information */
789 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
791 goto err_unroll_sched;
792 /* need a valid SW entry point to build a Tx tree */
793 if (!hw->sw_entry_point_layer) {
794 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
795 status = ICE_ERR_CFG;
796 goto err_unroll_sched;
798 INIT_LIST_HEAD(&hw->agg_list);
799 /* Initialize max burst size */
800 if (!hw->max_burst_size)
801 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
802 status = ice_init_fltr_mgmt_struct(hw);
804 goto err_unroll_sched;
806 /* Get MAC information */
807 /* A single port can report up to two (LAN and WoL) addresses */
808 mac_buf = ice_calloc(hw, 2,
809 sizeof(struct ice_aqc_manage_mac_read_resp));
810 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
813 status = ICE_ERR_NO_MEMORY;
814 goto err_unroll_fltr_mgmt_struct;
817 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
818 ice_free(hw, mac_buf);
821 goto err_unroll_fltr_mgmt_struct;
822 /* enable jumbo frame support at MAC level */
823 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
825 goto err_unroll_fltr_mgmt_struct;
826 /* Obtain counter base index which would be used by flow director */
827 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
829 goto err_unroll_fltr_mgmt_struct;
830 status = ice_init_hw_tbls(hw);
832 goto err_unroll_fltr_mgmt_struct;
833 ice_init_lock(&hw->tnl_lock);
836 err_unroll_fltr_mgmt_struct:
837 ice_cleanup_fltr_mgmt_struct(hw);
839 ice_sched_cleanup_all(hw);
841 ice_free(hw, hw->port_info);
842 hw->port_info = NULL;
844 ice_destroy_all_ctrlq(hw);
849 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
850 * @hw: pointer to the hardware structure
852 * This should be called only during nominal operation, not as a result of
853 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
854 * applicable initializations if it fails for any reason.
856 void ice_deinit_hw(struct ice_hw *hw)
858 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
859 ice_cleanup_fltr_mgmt_struct(hw);
861 ice_sched_cleanup_all(hw);
862 ice_sched_clear_agg(hw);
864 ice_free_hw_tbls(hw);
865 ice_destroy_lock(&hw->tnl_lock);
868 ice_free(hw, hw->port_info);
869 hw->port_info = NULL;
872 ice_destroy_all_ctrlq(hw);
874 /* Clear VSI contexts if not already cleared */
875 ice_clear_all_vsi_ctx(hw);
879 * ice_check_reset - Check to see if a global reset is complete
880 * @hw: pointer to the hardware structure
882 enum ice_status ice_check_reset(struct ice_hw *hw)
884 u32 cnt, reg = 0, grst_timeout, uld_mask;
886 /* Poll for Device Active state in case a recent CORER, GLOBR,
887 * or EMPR has occurred. The grst delay value is in 100ms units.
888 * Add 1sec for outstanding AQ commands that can take a long time.
890 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
891 GLGEN_RSTCTL_GRSTDEL_S) + 10;
893 for (cnt = 0; cnt < grst_timeout; cnt++) {
894 ice_msec_delay(100, true);
895 reg = rd32(hw, GLGEN_RSTAT);
896 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
900 if (cnt == grst_timeout) {
901 ice_debug(hw, ICE_DBG_INIT,
902 "Global reset polling failed to complete.\n");
903 return ICE_ERR_RESET_FAILED;
906 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
907 GLNVM_ULD_PCIER_DONE_1_M |\
908 GLNVM_ULD_CORER_DONE_M |\
909 GLNVM_ULD_GLOBR_DONE_M |\
910 GLNVM_ULD_POR_DONE_M |\
911 GLNVM_ULD_POR_DONE_1_M |\
912 GLNVM_ULD_PCIER_DONE_2_M)
914 uld_mask = ICE_RESET_DONE_MASK;
916 /* Device is Active; check Global Reset processes are done */
917 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
918 reg = rd32(hw, GLNVM_ULD) & uld_mask;
919 if (reg == uld_mask) {
920 ice_debug(hw, ICE_DBG_INIT,
921 "Global reset processes done. %d\n", cnt);
924 ice_msec_delay(10, true);
927 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
928 ice_debug(hw, ICE_DBG_INIT,
929 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
931 return ICE_ERR_RESET_FAILED;
938 * ice_pf_reset - Reset the PF
939 * @hw: pointer to the hardware structure
941 * If a global reset has been triggered, this function checks
942 * for its completion and then issues the PF reset
944 static enum ice_status ice_pf_reset(struct ice_hw *hw)
948 /* If at function entry a global reset was already in progress, i.e.
949 * state is not 'device active' or any of the reset done bits are not
950 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
951 * global reset is done.
953 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
954 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
955 /* poll on global reset currently in progress until done */
956 if (ice_check_reset(hw))
957 return ICE_ERR_RESET_FAILED;
963 reg = rd32(hw, PFGEN_CTRL);
965 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
967 /* Wait for the PFR to complete. The wait time is the global config lock
968 * timeout plus the PFR timeout which will account for a possible reset
969 * that is occurring during a download package operation.
971 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
972 ICE_PF_RESET_WAIT_COUNT; cnt++) {
973 reg = rd32(hw, PFGEN_CTRL);
974 if (!(reg & PFGEN_CTRL_PFSWR_M))
977 ice_msec_delay(1, true);
980 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
981 ice_debug(hw, ICE_DBG_INIT,
982 "PF reset polling failed to complete.\n");
983 return ICE_ERR_RESET_FAILED;
990 * ice_reset - Perform different types of reset
991 * @hw: pointer to the hardware structure
992 * @req: reset request
994 * This function triggers a reset as specified by the req parameter.
997 * If anything other than a PF reset is triggered, PXE mode is restored.
998 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
999 * interface has been restored in the rebuild flow.
1001 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1007 return ice_pf_reset(hw);
1008 case ICE_RESET_CORER:
1009 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1010 val = GLGEN_RTRIG_CORER_M;
1012 case ICE_RESET_GLOBR:
1013 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1014 val = GLGEN_RTRIG_GLOBR_M;
1017 return ICE_ERR_PARAM;
1020 val |= rd32(hw, GLGEN_RTRIG);
1021 wr32(hw, GLGEN_RTRIG, val);
1024 /* wait for the FW to be ready */
1025 return ice_check_reset(hw);
1029 * ice_copy_rxq_ctx_to_hw
1030 * @hw: pointer to the hardware structure
1031 * @ice_rxq_ctx: pointer to the rxq context
1032 * @rxq_index: the index of the Rx queue
1034 * Copies rxq context from dense structure to HW register space
1036 static enum ice_status
1037 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1042 return ICE_ERR_BAD_PTR;
1044 if (rxq_index > QRX_CTRL_MAX_INDEX)
1045 return ICE_ERR_PARAM;
1047 /* Copy each dword separately to HW */
1048 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1049 wr32(hw, QRX_CONTEXT(i, rxq_index),
1050 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1052 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1053 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1059 /* LAN Rx Queue Context */
1060 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1061 /* Field Width LSB */
1062 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1063 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1064 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1065 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1066 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1067 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1068 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1069 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1070 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1071 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1072 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1073 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1074 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1075 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1076 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1077 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1078 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1079 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1080 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1081 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1087 * @hw: pointer to the hardware structure
1088 * @rlan_ctx: pointer to the rxq context
1089 * @rxq_index: the index of the Rx queue
1091 * Converts rxq context from sparse to dense structure and then writes
1092 * it to HW register space and enables the hardware to prefetch descriptors
1093 * instead of only fetching them on demand
1096 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1099 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1102 return ICE_ERR_BAD_PTR;
1104 rlan_ctx->prefena = 1;
1106 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1107 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1112 * @hw: pointer to the hardware structure
1113 * @rxq_index: the index of the Rx queue to clear
1115 * Clears rxq context in HW register space
1117 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1121 if (rxq_index > QRX_CTRL_MAX_INDEX)
1122 return ICE_ERR_PARAM;
1124 /* Clear each dword register separately */
1125 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1126 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1131 /* LAN Tx Queue Context */
1132 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1133 /* Field Width LSB */
1134 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1135 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1136 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1137 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1138 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1139 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1140 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1141 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1142 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1143 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1144 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1145 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1146 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1147 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1148 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1149 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1150 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1151 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1152 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1153 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1154 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1155 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1156 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1157 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1158 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1159 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1160 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1161 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1166 * ice_copy_tx_cmpltnq_ctx_to_hw
1167 * @hw: pointer to the hardware structure
1168 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1169 * @tx_cmpltnq_index: the index of the completion queue
1171 * Copies Tx completion queue context from dense structure to HW register space
1173 static enum ice_status
1174 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1175 u32 tx_cmpltnq_index)
1179 if (!ice_tx_cmpltnq_ctx)
1180 return ICE_ERR_BAD_PTR;
1182 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1183 return ICE_ERR_PARAM;
1185 /* Copy each dword separately to HW */
1186 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1187 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1188 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1190 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1191 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1197 /* LAN Tx Completion Queue Context */
1198 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1199 /* Field Width LSB */
1200 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1201 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1202 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1203 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1204 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1205 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1206 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1207 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1208 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1209 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1214 * ice_write_tx_cmpltnq_ctx
1215 * @hw: pointer to the hardware structure
1216 * @tx_cmpltnq_ctx: pointer to the completion queue context
1217 * @tx_cmpltnq_index: the index of the completion queue
1219 * Converts completion queue context from sparse to dense structure and then
1220 * writes it to HW register space
1223 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1224 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1225 u32 tx_cmpltnq_index)
1227 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1229 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1230 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1234 * ice_clear_tx_cmpltnq_ctx
1235 * @hw: pointer to the hardware structure
1236 * @tx_cmpltnq_index: the index of the completion queue to clear
1238 * Clears Tx completion queue context in HW register space
1241 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1245 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1246 return ICE_ERR_PARAM;
1248 /* Clear each dword register separately */
1249 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1250 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1256 * ice_copy_tx_drbell_q_ctx_to_hw
1257 * @hw: pointer to the hardware structure
1258 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1259 * @tx_drbell_q_index: the index of the doorbell queue
1261 * Copies doorbell queue context from dense structure to HW register space
1263 static enum ice_status
1264 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1265 u32 tx_drbell_q_index)
1269 if (!ice_tx_drbell_q_ctx)
1270 return ICE_ERR_BAD_PTR;
1272 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1273 return ICE_ERR_PARAM;
1275 /* Copy each dword separately to HW */
1276 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1277 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1278 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1280 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1281 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1287 /* LAN Tx Doorbell Queue Context info */
1288 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1289 /* Field Width LSB */
1290 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1291 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1292 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1293 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1294 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1295 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1296 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1297 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1298 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1299 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1300 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1305 * ice_write_tx_drbell_q_ctx
1306 * @hw: pointer to the hardware structure
1307 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1308 * @tx_drbell_q_index: the index of the doorbell queue
1310 * Converts doorbell queue context from sparse to dense structure and then
1311 * writes it to HW register space
1314 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1315 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1316 u32 tx_drbell_q_index)
1318 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1320 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1321 ice_tx_drbell_q_ctx_info);
1322 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1326 * ice_clear_tx_drbell_q_ctx
1327 * @hw: pointer to the hardware structure
1328 * @tx_drbell_q_index: the index of the doorbell queue to clear
1330 * Clears doorbell queue context in HW register space
1333 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1337 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1338 return ICE_ERR_PARAM;
1340 /* Clear each dword register separately */
1341 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1342 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1347 /* FW Admin Queue command wrappers */
1350 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1351 * @hw: pointer to the HW struct
1352 * @desc: descriptor describing the command
1353 * @buf: buffer to use for indirect commands (NULL for direct commands)
1354 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1355 * @cd: pointer to command details structure
1357 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1360 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1361 u16 buf_size, struct ice_sq_cd *cd)
1363 if (hw->aq_send_cmd_fn) {
1364 enum ice_status status = ICE_ERR_NOT_READY;
1365 u16 retval = ICE_AQ_RC_OK;
1367 ice_acquire_lock(&hw->adminq.sq_lock);
1368 if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
1370 retval = LE16_TO_CPU(desc->retval);
1371 /* strip off FW internal code */
1374 if (retval == ICE_AQ_RC_OK)
1375 status = ICE_SUCCESS;
1377 status = ICE_ERR_AQ_ERROR;
1380 hw->adminq.sq_last_status = (enum ice_aq_err)retval;
1381 ice_release_lock(&hw->adminq.sq_lock);
1385 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1390 * @hw: pointer to the HW struct
1391 * @cd: pointer to command details structure or NULL
1393 * Get the firmware version (0x0001) from the admin queue commands
1395 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1397 struct ice_aqc_get_ver *resp;
1398 struct ice_aq_desc desc;
1399 enum ice_status status;
1401 resp = &desc.params.get_ver;
1403 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1405 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1408 hw->fw_branch = resp->fw_branch;
1409 hw->fw_maj_ver = resp->fw_major;
1410 hw->fw_min_ver = resp->fw_minor;
1411 hw->fw_patch = resp->fw_patch;
1412 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1413 hw->api_branch = resp->api_branch;
1414 hw->api_maj_ver = resp->api_major;
1415 hw->api_min_ver = resp->api_minor;
1416 hw->api_patch = resp->api_patch;
1423 * ice_aq_send_driver_ver
1424 * @hw: pointer to the HW struct
1425 * @dv: driver's major, minor version
1426 * @cd: pointer to command details structure or NULL
1428 * Send the driver version (0x0002) to the firmware
1431 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1432 struct ice_sq_cd *cd)
1434 struct ice_aqc_driver_ver *cmd;
1435 struct ice_aq_desc desc;
1438 cmd = &desc.params.driver_ver;
1441 return ICE_ERR_PARAM;
1443 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1445 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1446 cmd->major_ver = dv->major_ver;
1447 cmd->minor_ver = dv->minor_ver;
1448 cmd->build_ver = dv->build_ver;
1449 cmd->subbuild_ver = dv->subbuild_ver;
1452 while (len < sizeof(dv->driver_string) &&
1453 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1456 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1461 * @hw: pointer to the HW struct
1462 * @unloading: is the driver unloading itself
1464 * Tell the Firmware that we're shutting down the AdminQ and whether
1465 * or not the driver is unloading as well (0x0003).
1467 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1469 struct ice_aqc_q_shutdown *cmd;
1470 struct ice_aq_desc desc;
1472 cmd = &desc.params.q_shutdown;
1474 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1477 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1479 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1484 * @hw: pointer to the HW struct
1486 * @access: access type
1487 * @sdp_number: resource number
1488 * @timeout: the maximum time in ms that the driver may hold the resource
1489 * @cd: pointer to command details structure or NULL
1491 * Requests common resource using the admin queue commands (0x0008).
1492 * When attempting to acquire the Global Config Lock, the driver can
1493 * learn of three states:
1494 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1495 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1496 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1497 * successfully downloaded the package; the driver does
1498 * not have to download the package and can continue
1501 * Note that if the caller is in an acquire lock, perform action, release lock
1502 * phase of operation, it is possible that the FW may detect a timeout and issue
1503 * a CORER. In this case, the driver will receive a CORER interrupt and will
1504 * have to determine its cause. The calling thread that is handling this flow
1505 * will likely get an error propagated back to it indicating the Download
1506 * Package, Update Package or the Release Resource AQ commands timed out.
1508 static enum ice_status
1509 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1510 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1511 struct ice_sq_cd *cd)
1513 struct ice_aqc_req_res *cmd_resp;
1514 struct ice_aq_desc desc;
1515 enum ice_status status;
1517 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1519 cmd_resp = &desc.params.res_owner;
1521 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1523 cmd_resp->res_id = CPU_TO_LE16(res);
1524 cmd_resp->access_type = CPU_TO_LE16(access);
1525 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1526 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1529 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1531 /* The completion specifies the maximum time in ms that the driver
1532 * may hold the resource in the Timeout field.
1535 /* Global config lock response utilizes an additional status field.
1537 * If the Global config lock resource is held by some other driver, the
1538 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1539 * and the timeout field indicates the maximum time the current owner
1540 * of the resource has to free it.
1542 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1543 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1544 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1546 } else if (LE16_TO_CPU(cmd_resp->status) ==
1547 ICE_AQ_RES_GLBL_IN_PROG) {
1548 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1549 return ICE_ERR_AQ_ERROR;
1550 } else if (LE16_TO_CPU(cmd_resp->status) ==
1551 ICE_AQ_RES_GLBL_DONE) {
1552 return ICE_ERR_AQ_NO_WORK;
1555 /* invalid FW response, force a timeout immediately */
1557 return ICE_ERR_AQ_ERROR;
1560 /* If the resource is held by some other driver, the command completes
1561 * with a busy return value and the timeout field indicates the maximum
1562 * time the current owner of the resource has to free it.
1564 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1565 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1571 * ice_aq_release_res
1572 * @hw: pointer to the HW struct
1574 * @sdp_number: resource number
1575 * @cd: pointer to command details structure or NULL
1577 * release common resource using the admin queue commands (0x0009)
1579 static enum ice_status
1580 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1581 struct ice_sq_cd *cd)
1583 struct ice_aqc_req_res *cmd;
1584 struct ice_aq_desc desc;
1586 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1588 cmd = &desc.params.res_owner;
1590 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1592 cmd->res_id = CPU_TO_LE16(res);
1593 cmd->res_number = CPU_TO_LE32(sdp_number);
1595 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1600 * @hw: pointer to the HW structure
1602 * @access: access type (read or write)
1603 * @timeout: timeout in milliseconds
1605 * This function will attempt to acquire the ownership of a resource.
1608 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1609 enum ice_aq_res_access_type access, u32 timeout)
1611 #define ICE_RES_POLLING_DELAY_MS 10
1612 u32 delay = ICE_RES_POLLING_DELAY_MS;
1613 u32 time_left = timeout;
1614 enum ice_status status;
1616 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1618 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1620 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1621 * previously acquired the resource and performed any necessary updates;
1622 * in this case the caller does not obtain the resource and has no
1623 * further work to do.
1625 if (status == ICE_ERR_AQ_NO_WORK)
1626 goto ice_acquire_res_exit;
1629 ice_debug(hw, ICE_DBG_RES,
1630 "resource %d acquire type %d failed.\n", res, access);
1632 /* If necessary, poll until the current lock owner timeouts */
1633 timeout = time_left;
1634 while (status && timeout && time_left) {
1635 ice_msec_delay(delay, true);
1636 timeout = (timeout > delay) ? timeout - delay : 0;
1637 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1639 if (status == ICE_ERR_AQ_NO_WORK)
1640 /* lock free, but no work to do */
1647 if (status && status != ICE_ERR_AQ_NO_WORK)
1648 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1650 ice_acquire_res_exit:
1651 if (status == ICE_ERR_AQ_NO_WORK) {
1652 if (access == ICE_RES_WRITE)
1653 ice_debug(hw, ICE_DBG_RES,
1654 "resource indicates no work to do.\n");
1656 ice_debug(hw, ICE_DBG_RES,
1657 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1664 * @hw: pointer to the HW structure
1667 * This function will release a resource using the proper Admin Command.
1669 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1671 enum ice_status status;
1672 u32 total_delay = 0;
1674 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1676 status = ice_aq_release_res(hw, res, 0, NULL);
1678 /* there are some rare cases when trying to release the resource
1679 * results in an admin queue timeout, so handle them correctly
1681 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1682 (total_delay < hw->adminq.sq_cmd_timeout)) {
1683 ice_msec_delay(1, true);
1684 status = ice_aq_release_res(hw, res, 0, NULL);
1690 * ice_aq_alloc_free_res - command to allocate/free resources
1691 * @hw: pointer to the HW struct
1692 * @num_entries: number of resource entries in buffer
1693 * @buf: Indirect buffer to hold data parameters and response
1694 * @buf_size: size of buffer for indirect commands
1695 * @opc: pass in the command opcode
1696 * @cd: pointer to command details structure or NULL
1698 * Helper function to allocate/free resources using the admin queue commands
1701 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1702 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1703 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1705 struct ice_aqc_alloc_free_res_cmd *cmd;
1706 struct ice_aq_desc desc;
1708 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1710 cmd = &desc.params.sw_res_ctrl;
1713 return ICE_ERR_PARAM;
1715 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1716 return ICE_ERR_PARAM;
1718 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1720 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1722 cmd->num_entries = CPU_TO_LE16(num_entries);
1724 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1728 * ice_alloc_hw_res - allocate resource
1729 * @hw: pointer to the HW struct
1730 * @type: type of resource
1731 * @num: number of resources to allocate
1732 * @btm: allocate from bottom
1733 * @res: pointer to array that will receive the resources
1736 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1738 struct ice_aqc_alloc_free_res_elem *buf;
1739 enum ice_status status;
1742 buf_len = ice_struct_size(buf, elem, num);
1743 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1745 return ICE_ERR_NO_MEMORY;
1747 /* Prepare buffer to allocate resource. */
1748 buf->num_elems = CPU_TO_LE16(num);
1749 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1750 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1752 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1754 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1755 ice_aqc_opc_alloc_res, NULL);
1757 goto ice_alloc_res_exit;
1759 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
1760 ICE_NONDMA_TO_NONDMA);
1768 * ice_free_hw_res - free allocated HW resource
1769 * @hw: pointer to the HW struct
1770 * @type: type of resource to free
1771 * @num: number of resources
1772 * @res: pointer to array that contains the resources to free
1774 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1776 struct ice_aqc_alloc_free_res_elem *buf;
1777 enum ice_status status;
1780 buf_len = ice_struct_size(buf, elem, num);
1781 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1783 return ICE_ERR_NO_MEMORY;
1785 /* Prepare buffer to free resource. */
1786 buf->num_elems = CPU_TO_LE16(num);
1787 buf->res_type = CPU_TO_LE16(type);
1788 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
1789 ICE_NONDMA_TO_NONDMA);
1791 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1792 ice_aqc_opc_free_res, NULL);
1794 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1801 * ice_get_num_per_func - determine number of resources per PF
1802 * @hw: pointer to the HW structure
1803 * @max: value to be evenly split between each PF
1805 * Determine the number of valid functions by going through the bitmap returned
1806 * from parsing capabilities and use this to calculate the number of resources
1807 * per PF based on the max value passed in.
1809 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1813 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1814 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1815 ICE_CAPS_VALID_FUNCS_M);
1824 * ice_parse_common_caps - parse common device/function capabilities
1825 * @hw: pointer to the HW struct
1826 * @caps: pointer to common capabilities structure
1827 * @elem: the capability element to parse
1828 * @prefix: message prefix for tracing capabilities
1830 * Given a capability element, extract relevant details into the common
1831 * capability structure.
1833 * Returns: true if the capability matches one of the common capability ids,
1837 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1838 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1840 u32 logical_id = LE32_TO_CPU(elem->logical_id);
1841 u32 phys_id = LE32_TO_CPU(elem->phys_id);
1842 u32 number = LE32_TO_CPU(elem->number);
1843 u16 cap = LE16_TO_CPU(elem->cap);
1847 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1848 caps->valid_functions = number;
1849 ice_debug(hw, ICE_DBG_INIT,
1850 "%s: valid_functions (bitmap) = %d\n", prefix,
1851 caps->valid_functions);
1853 case ICE_AQC_CAPS_DCB:
1854 caps->dcb = (number == 1);
1855 caps->active_tc_bitmap = logical_id;
1856 caps->maxtc = phys_id;
1857 ice_debug(hw, ICE_DBG_INIT,
1858 "%s: dcb = %d\n", prefix, caps->dcb);
1859 ice_debug(hw, ICE_DBG_INIT,
1860 "%s: active_tc_bitmap = %d\n", prefix,
1861 caps->active_tc_bitmap);
1862 ice_debug(hw, ICE_DBG_INIT,
1863 "%s: maxtc = %d\n", prefix, caps->maxtc);
1865 case ICE_AQC_CAPS_RSS:
1866 caps->rss_table_size = number;
1867 caps->rss_table_entry_width = logical_id;
1868 ice_debug(hw, ICE_DBG_INIT,
1869 "%s: rss_table_size = %d\n", prefix,
1870 caps->rss_table_size);
1871 ice_debug(hw, ICE_DBG_INIT,
1872 "%s: rss_table_entry_width = %d\n", prefix,
1873 caps->rss_table_entry_width);
1875 case ICE_AQC_CAPS_RXQS:
1876 caps->num_rxq = number;
1877 caps->rxq_first_id = phys_id;
1878 ice_debug(hw, ICE_DBG_INIT,
1879 "%s: num_rxq = %d\n", prefix,
1881 ice_debug(hw, ICE_DBG_INIT,
1882 "%s: rxq_first_id = %d\n", prefix,
1883 caps->rxq_first_id);
1885 case ICE_AQC_CAPS_TXQS:
1886 caps->num_txq = number;
1887 caps->txq_first_id = phys_id;
1888 ice_debug(hw, ICE_DBG_INIT,
1889 "%s: num_txq = %d\n", prefix,
1891 ice_debug(hw, ICE_DBG_INIT,
1892 "%s: txq_first_id = %d\n", prefix,
1893 caps->txq_first_id);
1895 case ICE_AQC_CAPS_MSIX:
1896 caps->num_msix_vectors = number;
1897 caps->msix_vector_first_id = phys_id;
1898 ice_debug(hw, ICE_DBG_INIT,
1899 "%s: num_msix_vectors = %d\n", prefix,
1900 caps->num_msix_vectors);
1901 ice_debug(hw, ICE_DBG_INIT,
1902 "%s: msix_vector_first_id = %d\n", prefix,
1903 caps->msix_vector_first_id);
1905 case ICE_AQC_CAPS_MAX_MTU:
1906 caps->max_mtu = number;
1907 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1908 prefix, caps->max_mtu);
1911 /* Not one of the recognized common capabilities */
1919 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
1920 * @hw: pointer to the HW structure
1921 * @caps: pointer to capabilities structure to fix
1923 * Re-calculate the capabilities that are dependent on the number of physical
1924 * ports; i.e. some features are not supported or function differently on
1925 * devices with more than 4 ports.
1928 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
1930 /* This assumes device capabilities are always scanned before function
1931 * capabilities during the initialization flow.
1933 if (hw->dev_caps.num_funcs > 4) {
1934 /* Max 4 TCs per port */
1936 ice_debug(hw, ICE_DBG_INIT,
1937 "reducing maxtc to %d (based on #ports)\n",
1943 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
1944 * @hw: pointer to the HW struct
1945 * @func_p: pointer to function capabilities structure
1946 * @cap: pointer to the capability element to parse
1948 * Extract function capabilities for ICE_AQC_CAPS_VSI.
1951 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1952 struct ice_aqc_list_caps_elem *cap)
1954 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
1955 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
1956 LE32_TO_CPU(cap->number));
1957 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
1958 func_p->guar_num_vsi);
1962 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
1963 * @hw: pointer to the HW struct
1964 * @func_p: pointer to function capabilities structure
1965 * @cap: pointer to the capability element to parse
1967 * Extract function capabilities for ICE_AQC_CAPS_FD.
1970 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1971 struct ice_aqc_list_caps_elem *cap)
1975 if (hw->dcf_enabled)
1977 reg_val = rd32(hw, GLQF_FD_SIZE);
1978 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1979 GLQF_FD_SIZE_FD_GSIZE_S;
1980 func_p->fd_fltr_guar =
1981 ice_get_num_per_func(hw, val);
1982 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1983 GLQF_FD_SIZE_FD_BSIZE_S;
1984 func_p->fd_fltr_best_effort = val;
1986 ice_debug(hw, ICE_DBG_INIT,
1987 "func caps: fd_fltr_guar = %d\n",
1988 func_p->fd_fltr_guar);
1989 ice_debug(hw, ICE_DBG_INIT,
1990 "func caps: fd_fltr_best_effort = %d\n",
1991 func_p->fd_fltr_best_effort);
1995 * ice_parse_func_caps - Parse function capabilities
1996 * @hw: pointer to the HW struct
1997 * @func_p: pointer to function capabilities structure
1998 * @buf: buffer containing the function capability records
1999 * @cap_count: the number of capabilities
2001 * Helper function to parse function (0x000A) capabilities list. For
2002 * capabilities shared between device and function, this relies on
2003 * ice_parse_common_caps.
2005 * Loop through the list of provided capabilities and extract the relevant
2006 * data into the function capabilities structured.
2009 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2010 void *buf, u32 cap_count)
2012 struct ice_aqc_list_caps_elem *cap_resp;
2015 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2017 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2019 for (i = 0; i < cap_count; i++) {
2020 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2023 found = ice_parse_common_caps(hw, &func_p->common_cap,
2024 &cap_resp[i], "func caps");
2027 case ICE_AQC_CAPS_VSI:
2028 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2030 case ICE_AQC_CAPS_FD:
2031 ice_parse_fdir_func_caps(hw, func_p, &cap_resp[i]);
2034 /* Don't list common capabilities as unknown */
2036 ice_debug(hw, ICE_DBG_INIT,
2037 "func caps: unknown capability[%d]: 0x%x\n",
2043 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2047 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2048 * @hw: pointer to the HW struct
2049 * @dev_p: pointer to device capabilities structure
2050 * @cap: capability element to parse
2052 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2055 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2056 struct ice_aqc_list_caps_elem *cap)
2058 u32 number = LE32_TO_CPU(cap->number);
2060 dev_p->num_funcs = ice_hweight32(number);
2061 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2066 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2067 * @hw: pointer to the HW struct
2068 * @dev_p: pointer to device capabilities structure
2069 * @cap: capability element to parse
2071 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2074 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2075 struct ice_aqc_list_caps_elem *cap)
2077 u32 number = LE32_TO_CPU(cap->number);
2079 dev_p->num_vsi_allocd_to_host = number;
2080 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2081 dev_p->num_vsi_allocd_to_host);
2085 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2086 * @hw: pointer to the HW struct
2087 * @dev_p: pointer to device capabilities structure
2088 * @cap: capability element to parse
2090 * Parse ICE_AQC_CAPS_FD for device capabilities.
2093 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2094 struct ice_aqc_list_caps_elem *cap)
2096 u32 number = LE32_TO_CPU(cap->number);
2098 dev_p->num_flow_director_fltr = number;
2099 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2100 dev_p->num_flow_director_fltr);
2104 * ice_parse_dev_caps - Parse device capabilities
2105 * @hw: pointer to the HW struct
2106 * @dev_p: pointer to device capabilities structure
2107 * @buf: buffer containing the device capability records
2108 * @cap_count: the number of capabilities
2110 * Helper device to parse device (0x000B) capabilities list. For
2111 * capabilities shared between device and device, this relies on
2112 * ice_parse_common_caps.
2114 * Loop through the list of provided capabilities and extract the relevant
2115 * data into the device capabilities structured.
2118 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2119 void *buf, u32 cap_count)
2121 struct ice_aqc_list_caps_elem *cap_resp;
2124 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2126 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2128 for (i = 0; i < cap_count; i++) {
2129 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2132 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2133 &cap_resp[i], "dev caps");
2136 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2137 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2139 case ICE_AQC_CAPS_VSI:
2140 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2142 case ICE_AQC_CAPS_FD:
2143 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2146 /* Don't list common capabilities as unknown */
2148 ice_debug(hw, ICE_DBG_INIT,
2149 "dev caps: unknown capability[%d]: 0x%x\n",
2155 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2159 * ice_aq_list_caps - query function/device capabilities
2160 * @hw: pointer to the HW struct
2161 * @buf: a buffer to hold the capabilities
2162 * @buf_size: size of the buffer
2163 * @cap_count: if not NULL, set to the number of capabilities reported
2164 * @opc: capabilities type to discover, device or function
2165 * @cd: pointer to command details structure or NULL
2167 * Get the function (0x000A) or device (0x000B) capabilities description from
2168 * firmware and store it in the buffer.
2170 * If the cap_count pointer is not NULL, then it is set to the number of
2171 * capabilities firmware will report. Note that if the buffer size is too
2172 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2173 * cap_count will still be updated in this case. It is recommended that the
2174 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2175 * firmware could return) to avoid this.
2177 static enum ice_status
2178 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2179 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2181 struct ice_aqc_list_caps *cmd;
2182 struct ice_aq_desc desc;
2183 enum ice_status status;
2185 cmd = &desc.params.get_cap;
2187 if (opc != ice_aqc_opc_list_func_caps &&
2188 opc != ice_aqc_opc_list_dev_caps)
2189 return ICE_ERR_PARAM;
2191 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2192 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2195 *cap_count = LE32_TO_CPU(cmd->count);
2201 * ice_discover_dev_caps - Read and extract device capabilities
2202 * @hw: pointer to the hardware structure
2203 * @dev_caps: pointer to device capabilities structure
2205 * Read the device capabilities and extract them into the dev_caps structure
2208 static enum ice_status
2209 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2211 enum ice_status status;
2215 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2217 return ICE_ERR_NO_MEMORY;
2219 /* Although the driver doesn't know the number of capabilities the
2220 * device will return, we can simply send a 4KB buffer, the maximum
2221 * possible size that firmware can return.
2223 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2225 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2226 ice_aqc_opc_list_dev_caps, NULL);
2228 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2235 * ice_discover_func_caps - Read and extract function capabilities
2236 * @hw: pointer to the hardware structure
2237 * @func_caps: pointer to function capabilities structure
2239 * Read the function capabilities and extract them into the func_caps structure
2242 static enum ice_status
2243 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2245 enum ice_status status;
2249 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2251 return ICE_ERR_NO_MEMORY;
2253 /* Although the driver doesn't know the number of capabilities the
2254 * device will return, we can simply send a 4KB buffer, the maximum
2255 * possible size that firmware can return.
2257 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2259 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2260 ice_aqc_opc_list_func_caps, NULL);
2262 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2269 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2270 * @hw: pointer to the hardware structure
2272 void ice_set_safe_mode_caps(struct ice_hw *hw)
2274 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2275 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2276 u32 valid_func, rxq_first_id, txq_first_id;
2277 u32 msix_vector_first_id, max_mtu;
2280 /* cache some func_caps values that should be restored after memset */
2281 valid_func = func_caps->common_cap.valid_functions;
2282 txq_first_id = func_caps->common_cap.txq_first_id;
2283 rxq_first_id = func_caps->common_cap.rxq_first_id;
2284 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
2285 max_mtu = func_caps->common_cap.max_mtu;
2287 /* unset func capabilities */
2288 memset(func_caps, 0, sizeof(*func_caps));
2290 /* restore cached values */
2291 func_caps->common_cap.valid_functions = valid_func;
2292 func_caps->common_cap.txq_first_id = txq_first_id;
2293 func_caps->common_cap.rxq_first_id = rxq_first_id;
2294 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2295 func_caps->common_cap.max_mtu = max_mtu;
2297 /* one Tx and one Rx queue in safe mode */
2298 func_caps->common_cap.num_rxq = 1;
2299 func_caps->common_cap.num_txq = 1;
2301 /* two MSIX vectors, one for traffic and one for misc causes */
2302 func_caps->common_cap.num_msix_vectors = 2;
2303 func_caps->guar_num_vsi = 1;
2305 /* cache some dev_caps values that should be restored after memset */
2306 valid_func = dev_caps->common_cap.valid_functions;
2307 txq_first_id = dev_caps->common_cap.txq_first_id;
2308 rxq_first_id = dev_caps->common_cap.rxq_first_id;
2309 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
2310 max_mtu = dev_caps->common_cap.max_mtu;
2311 num_funcs = dev_caps->num_funcs;
2313 /* unset dev capabilities */
2314 memset(dev_caps, 0, sizeof(*dev_caps));
2316 /* restore cached values */
2317 dev_caps->common_cap.valid_functions = valid_func;
2318 dev_caps->common_cap.txq_first_id = txq_first_id;
2319 dev_caps->common_cap.rxq_first_id = rxq_first_id;
2320 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2321 dev_caps->common_cap.max_mtu = max_mtu;
2322 dev_caps->num_funcs = num_funcs;
2324 /* one Tx and one Rx queue per function in safe mode */
2325 dev_caps->common_cap.num_rxq = num_funcs;
2326 dev_caps->common_cap.num_txq = num_funcs;
2328 /* two MSIX vectors per function */
2329 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2333 * ice_get_caps - get info about the HW
2334 * @hw: pointer to the hardware structure
2336 enum ice_status ice_get_caps(struct ice_hw *hw)
2338 enum ice_status status;
2340 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2344 return ice_discover_func_caps(hw, &hw->func_caps);
2348 * ice_aq_manage_mac_write - manage MAC address write command
2349 * @hw: pointer to the HW struct
2350 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2351 * @flags: flags to control write behavior
2352 * @cd: pointer to command details structure or NULL
2354 * This function is used to write MAC address to the NVM (0x0108).
2357 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2358 struct ice_sq_cd *cd)
2360 struct ice_aqc_manage_mac_write *cmd;
2361 struct ice_aq_desc desc;
2363 cmd = &desc.params.mac_write;
2364 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2367 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
2369 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2373 * ice_aq_clear_pxe_mode
2374 * @hw: pointer to the HW struct
2376 * Tell the firmware that the driver is taking over from PXE (0x0110).
2378 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2380 struct ice_aq_desc desc;
2382 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2383 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2385 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2389 * ice_clear_pxe_mode - clear pxe operations mode
2390 * @hw: pointer to the HW struct
2392 * Make sure all PXE mode settings are cleared, including things
2393 * like descriptor fetch/write-back mode.
2395 void ice_clear_pxe_mode(struct ice_hw *hw)
2397 if (ice_check_sq_alive(hw, &hw->adminq))
2398 ice_aq_clear_pxe_mode(hw);
2402 * ice_get_link_speed_based_on_phy_type - returns link speed
2403 * @phy_type_low: lower part of phy_type
2404 * @phy_type_high: higher part of phy_type
2406 * This helper function will convert an entry in PHY type structure
2407 * [phy_type_low, phy_type_high] to its corresponding link speed.
2408 * Note: In the structure of [phy_type_low, phy_type_high], there should
2409 * be one bit set, as this function will convert one PHY type to its
2411 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2412 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2415 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2417 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2418 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2420 switch (phy_type_low) {
2421 case ICE_PHY_TYPE_LOW_100BASE_TX:
2422 case ICE_PHY_TYPE_LOW_100M_SGMII:
2423 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2425 case ICE_PHY_TYPE_LOW_1000BASE_T:
2426 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2427 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2428 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2429 case ICE_PHY_TYPE_LOW_1G_SGMII:
2430 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2432 case ICE_PHY_TYPE_LOW_2500BASE_T:
2433 case ICE_PHY_TYPE_LOW_2500BASE_X:
2434 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2435 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2437 case ICE_PHY_TYPE_LOW_5GBASE_T:
2438 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2439 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2441 case ICE_PHY_TYPE_LOW_10GBASE_T:
2442 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2443 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2444 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2445 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2446 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2447 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2448 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2450 case ICE_PHY_TYPE_LOW_25GBASE_T:
2451 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2452 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2453 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2454 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2455 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2456 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2457 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2458 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2459 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2460 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2461 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2463 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2464 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2465 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2466 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2467 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2468 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2469 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2471 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2472 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2473 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2474 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2475 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2476 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2477 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2478 case ICE_PHY_TYPE_LOW_50G_AUI2:
2479 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2480 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2481 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2482 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2483 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2484 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2485 case ICE_PHY_TYPE_LOW_50G_AUI1:
2486 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2488 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2489 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2490 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2491 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2492 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2493 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2494 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2495 case ICE_PHY_TYPE_LOW_100G_AUI4:
2496 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2497 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2498 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2499 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2500 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2501 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2504 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2508 switch (phy_type_high) {
2509 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2510 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2511 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2512 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2513 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2514 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2517 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2521 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2522 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2523 return ICE_AQ_LINK_SPEED_UNKNOWN;
2524 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2525 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2526 return ICE_AQ_LINK_SPEED_UNKNOWN;
2527 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2528 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2529 return speed_phy_type_low;
2531 return speed_phy_type_high;
2535 * ice_update_phy_type
2536 * @phy_type_low: pointer to the lower part of phy_type
2537 * @phy_type_high: pointer to the higher part of phy_type
2538 * @link_speeds_bitmap: targeted link speeds bitmap
2540 * Note: For the link_speeds_bitmap structure, you can check it at
2541 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2542 * link_speeds_bitmap include multiple speeds.
2544 * Each entry in this [phy_type_low, phy_type_high] structure will
2545 * present a certain link speed. This helper function will turn on bits
2546 * in [phy_type_low, phy_type_high] structure based on the value of
2547 * link_speeds_bitmap input parameter.
2550 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2551 u16 link_speeds_bitmap)
2558 /* We first check with low part of phy_type */
2559 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2560 pt_low = BIT_ULL(index);
2561 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2563 if (link_speeds_bitmap & speed)
2564 *phy_type_low |= BIT_ULL(index);
2567 /* We then check with high part of phy_type */
2568 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2569 pt_high = BIT_ULL(index);
2570 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2572 if (link_speeds_bitmap & speed)
2573 *phy_type_high |= BIT_ULL(index);
2578 * ice_aq_set_phy_cfg
2579 * @hw: pointer to the HW struct
2580 * @pi: port info structure of the interested logical port
2581 * @cfg: structure with PHY configuration data to be set
2582 * @cd: pointer to command details structure or NULL
2584 * Set the various PHY configuration parameters supported on the Port.
2585 * One or more of the Set PHY config parameters may be ignored in an MFP
2586 * mode as the PF may not have the privilege to set some of the PHY Config
2587 * parameters. This status will be indicated by the command response (0x0601).
2590 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2591 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2593 struct ice_aq_desc desc;
2594 enum ice_status status;
2597 return ICE_ERR_PARAM;
2599 /* Ensure that only valid bits of cfg->caps can be turned on. */
2600 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2601 ice_debug(hw, ICE_DBG_PHY,
2602 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2605 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2608 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2609 desc.params.set_phy.lport_num = pi->lport;
2610 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2612 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2613 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2614 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2615 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2616 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2617 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2618 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2619 cfg->low_power_ctrl_an);
2620 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2621 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2622 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2625 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2627 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2628 status = ICE_SUCCESS;
2631 pi->phy.curr_user_phy_cfg = *cfg;
2637 * ice_update_link_info - update status of the HW network link
2638 * @pi: port info structure of the interested logical port
2640 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2642 struct ice_link_status *li;
2643 enum ice_status status;
2646 return ICE_ERR_PARAM;
2648 li = &pi->phy.link_info;
2650 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2654 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2655 struct ice_aqc_get_phy_caps_data *pcaps;
2659 pcaps = (struct ice_aqc_get_phy_caps_data *)
2660 ice_malloc(hw, sizeof(*pcaps));
2662 return ICE_ERR_NO_MEMORY;
2664 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2667 ice_free(hw, pcaps);
2674 * ice_cache_phy_user_req
2675 * @pi: port information structure
2676 * @cache_data: PHY logging data
2677 * @cache_mode: PHY logging mode
2679 * Log the user request on (FC, FEC, SPEED) for later user.
2682 ice_cache_phy_user_req(struct ice_port_info *pi,
2683 struct ice_phy_cache_mode_data cache_data,
2684 enum ice_phy_cache_mode cache_mode)
2689 switch (cache_mode) {
2691 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2693 case ICE_SPEED_MODE:
2694 pi->phy.curr_user_speed_req =
2695 cache_data.data.curr_user_speed_req;
2698 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2706 * ice_caps_to_fc_mode
2707 * @caps: PHY capabilities
2709 * Convert PHY FC capabilities to ice FC mode
2711 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2713 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2714 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2717 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2718 return ICE_FC_TX_PAUSE;
2720 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2721 return ICE_FC_RX_PAUSE;
2727 * ice_caps_to_fec_mode
2728 * @caps: PHY capabilities
2729 * @fec_options: Link FEC options
2731 * Convert PHY FEC capabilities to ice FEC mode
2733 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2735 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2736 return ICE_FEC_AUTO;
2738 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2739 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2740 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2741 ICE_AQC_PHY_FEC_25G_KR_REQ))
2742 return ICE_FEC_BASER;
2744 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2745 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2746 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2749 return ICE_FEC_NONE;
2752 static enum ice_status
2753 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2754 enum ice_fc_mode req_mode)
2756 struct ice_phy_cache_mode_data cache_data;
2757 u8 pause_mask = 0x0;
2760 return ICE_ERR_BAD_PTR;
2765 struct ice_aqc_get_phy_caps_data *pcaps;
2766 enum ice_status status;
2768 pcaps = (struct ice_aqc_get_phy_caps_data *)
2769 ice_malloc(pi->hw, sizeof(*pcaps));
2771 return ICE_ERR_NO_MEMORY;
2773 /* Query the value of FC that both the NIC and attached media
2776 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2779 ice_free(pi->hw, pcaps);
2783 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2784 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2786 ice_free(pi->hw, pcaps);
2790 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2791 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2793 case ICE_FC_RX_PAUSE:
2794 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2796 case ICE_FC_TX_PAUSE:
2797 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2803 /* clear the old pause settings */
2804 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2805 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2807 /* set the new capabilities */
2808 cfg->caps |= pause_mask;
2810 /* Cache user FC request */
2811 cache_data.data.curr_user_fc_req = req_mode;
2812 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2819 * @pi: port information structure
2820 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2821 * @ena_auto_link_update: enable automatic link update
2823 * Set the requested flow control mode.
2826 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2828 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2829 struct ice_aqc_get_phy_caps_data *pcaps;
2830 enum ice_status status;
2833 if (!pi || !aq_failures)
2834 return ICE_ERR_BAD_PTR;
2839 pcaps = (struct ice_aqc_get_phy_caps_data *)
2840 ice_malloc(hw, sizeof(*pcaps));
2842 return ICE_ERR_NO_MEMORY;
2844 /* Get the current PHY config */
2845 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2848 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2852 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2854 /* Configure the set PHY data */
2855 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2857 if (status != ICE_ERR_BAD_PTR)
2858 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2863 /* If the capabilities have changed, then set the new config */
2864 if (cfg.caps != pcaps->caps) {
2865 int retry_count, retry_max = 10;
2867 /* Auto restart link so settings take effect */
2868 if (ena_auto_link_update)
2869 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2871 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2873 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2877 /* Update the link info
2878 * It sometimes takes a really long time for link to
2879 * come back from the atomic reset. Thus, we wait a
2882 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2883 status = ice_update_link_info(pi);
2885 if (status == ICE_SUCCESS)
2888 ice_msec_delay(100, true);
2892 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2896 ice_free(hw, pcaps);
2901 * ice_phy_caps_equals_cfg
2902 * @phy_caps: PHY capabilities
2903 * @phy_cfg: PHY configuration
2905 * Helper function to determine if PHY capabilities matches PHY
2909 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2910 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2912 u8 caps_mask, cfg_mask;
2914 if (!phy_caps || !phy_cfg)
2917 /* These bits are not common between capabilities and configuration.
2918 * Do not use them to determine equality.
2920 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2921 ICE_AQC_PHY_EN_MOD_QUAL);
2922 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2924 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2925 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2926 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2927 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2928 phy_caps->eee_cap != phy_cfg->eee_cap ||
2929 phy_caps->eeer_value != phy_cfg->eeer_value ||
2930 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2937 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2938 * @pi: port information structure
2939 * @caps: PHY ability structure to copy date from
2940 * @cfg: PHY configuration structure to copy data to
2942 * Helper function to copy AQC PHY get ability data to PHY set configuration
2946 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2947 struct ice_aqc_get_phy_caps_data *caps,
2948 struct ice_aqc_set_phy_cfg_data *cfg)
2950 if (!pi || !caps || !cfg)
2953 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
2954 cfg->phy_type_low = caps->phy_type_low;
2955 cfg->phy_type_high = caps->phy_type_high;
2956 cfg->caps = caps->caps;
2957 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2958 cfg->eee_cap = caps->eee_cap;
2959 cfg->eeer_value = caps->eeer_value;
2960 cfg->link_fec_opt = caps->link_fec_options;
2961 cfg->module_compliance_enforcement =
2962 caps->module_compliance_enforcement;
2964 if (ice_fw_supports_link_override(pi->hw)) {
2965 struct ice_link_default_override_tlv tlv;
2967 if (ice_get_link_default_override(&tlv, pi))
2970 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2971 cfg->module_compliance_enforcement |=
2972 ICE_LINK_OVERRIDE_STRICT_MODE;
2977 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2978 * @pi: port information structure
2979 * @cfg: PHY configuration data to set FEC mode
2980 * @fec: FEC mode to configure
2983 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2984 enum ice_fec_mode fec)
2986 struct ice_aqc_get_phy_caps_data *pcaps;
2987 enum ice_status status = ICE_SUCCESS;
2991 return ICE_ERR_BAD_PTR;
2995 pcaps = (struct ice_aqc_get_phy_caps_data *)
2996 ice_malloc(hw, sizeof(*pcaps));
2998 return ICE_ERR_NO_MEMORY;
3000 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
3005 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3006 cfg->link_fec_opt = pcaps->link_fec_options;
3010 /* Clear RS bits, and AND BASE-R ability
3011 * bits and OR request bits.
3013 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3014 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3015 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3016 ICE_AQC_PHY_FEC_25G_KR_REQ;
3019 /* Clear BASE-R bits, and AND RS ability
3020 * bits and OR request bits.
3022 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3023 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3024 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3027 /* Clear all FEC option bits. */
3028 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3031 /* AND auto FEC bit, and all caps bits. */
3032 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3033 cfg->link_fec_opt |= pcaps->link_fec_options;
3036 status = ICE_ERR_PARAM;
3040 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
3041 struct ice_link_default_override_tlv tlv;
3043 if (ice_get_link_default_override(&tlv, pi))
3046 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3047 (tlv.options & ICE_LINK_OVERRIDE_EN))
3048 cfg->link_fec_opt = tlv.fec_options;
3052 ice_free(hw, pcaps);
3058 * ice_get_link_status - get status of the HW network link
3059 * @pi: port information structure
3060 * @link_up: pointer to bool (true/false = linkup/linkdown)
3062 * Variable link_up is true if link is up, false if link is down.
3063 * The variable link_up is invalid if status is non zero. As a
3064 * result of this call, link status reporting becomes enabled
3066 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3068 struct ice_phy_info *phy_info;
3069 enum ice_status status = ICE_SUCCESS;
3071 if (!pi || !link_up)
3072 return ICE_ERR_PARAM;
3074 phy_info = &pi->phy;
3076 if (phy_info->get_link_info) {
3077 status = ice_update_link_info(pi);
3080 ice_debug(pi->hw, ICE_DBG_LINK,
3081 "get link status error, status = %d\n",
3085 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3091 * ice_aq_set_link_restart_an
3092 * @pi: pointer to the port information structure
3093 * @ena_link: if true: enable link, if false: disable link
3094 * @cd: pointer to command details structure or NULL
3096 * Sets up the link and restarts the Auto-Negotiation over the link.
3099 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3100 struct ice_sq_cd *cd)
3102 struct ice_aqc_restart_an *cmd;
3103 struct ice_aq_desc desc;
3105 cmd = &desc.params.restart_an;
3107 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3109 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3110 cmd->lport_num = pi->lport;
3112 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3114 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3116 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3120 * ice_aq_set_event_mask
3121 * @hw: pointer to the HW struct
3122 * @port_num: port number of the physical function
3123 * @mask: event mask to be set
3124 * @cd: pointer to command details structure or NULL
3126 * Set event mask (0x0613)
3129 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3130 struct ice_sq_cd *cd)
3132 struct ice_aqc_set_event_mask *cmd;
3133 struct ice_aq_desc desc;
3135 cmd = &desc.params.set_event_mask;
3137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3139 cmd->lport_num = port_num;
3141 cmd->event_mask = CPU_TO_LE16(mask);
3142 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3146 * ice_aq_set_mac_loopback
3147 * @hw: pointer to the HW struct
3148 * @ena_lpbk: Enable or Disable loopback
3149 * @cd: pointer to command details structure or NULL
3151 * Enable/disable loopback on a given port
3154 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3156 struct ice_aqc_set_mac_lb *cmd;
3157 struct ice_aq_desc desc;
3159 cmd = &desc.params.set_mac_lb;
3161 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3163 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3165 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3169 * ice_aq_set_port_id_led
3170 * @pi: pointer to the port information
3171 * @is_orig_mode: is this LED set to original mode (by the net-list)
3172 * @cd: pointer to command details structure or NULL
3174 * Set LED value for the given port (0x06e9)
3177 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3178 struct ice_sq_cd *cd)
3180 struct ice_aqc_set_port_id_led *cmd;
3181 struct ice_hw *hw = pi->hw;
3182 struct ice_aq_desc desc;
3184 cmd = &desc.params.set_port_id_led;
3186 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3189 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3191 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3193 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3198 * @hw: pointer to the HW struct
3199 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3200 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3201 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3203 * @set_page: set or ignore the page
3204 * @data: pointer to data buffer to be read/written to the I2C device.
3205 * @length: 1-16 for read, 1 for write.
3206 * @write: 0 read, 1 for write.
3207 * @cd: pointer to command details structure or NULL
3209 * Read/Write SFF EEPROM (0x06EE)
3212 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3213 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3214 bool write, struct ice_sq_cd *cd)
3216 struct ice_aqc_sff_eeprom *cmd;
3217 struct ice_aq_desc desc;
3218 enum ice_status status;
3220 if (!data || (mem_addr & 0xff00))
3221 return ICE_ERR_PARAM;
3223 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3224 cmd = &desc.params.read_write_sff_param;
3225 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3226 cmd->lport_num = (u8)(lport & 0xff);
3227 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3228 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3229 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3231 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3232 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3233 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3234 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3236 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3238 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3243 * __ice_aq_get_set_rss_lut
3244 * @hw: pointer to the hardware structure
3245 * @vsi_id: VSI FW index
3246 * @lut_type: LUT table type
3247 * @lut: pointer to the LUT buffer provided by the caller
3248 * @lut_size: size of the LUT buffer
3249 * @glob_lut_idx: global LUT index
3250 * @set: set true to set the table, false to get the table
3252 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3254 static enum ice_status
3255 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3256 u16 lut_size, u8 glob_lut_idx, bool set)
3258 struct ice_aqc_get_set_rss_lut *cmd_resp;
3259 struct ice_aq_desc desc;
3260 enum ice_status status;
3263 cmd_resp = &desc.params.get_set_rss_lut;
3266 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3267 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3269 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3272 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3273 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3274 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3275 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3278 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3279 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3280 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3281 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3282 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3285 status = ICE_ERR_PARAM;
3286 goto ice_aq_get_set_rss_lut_exit;
3289 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3290 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3291 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3294 goto ice_aq_get_set_rss_lut_send;
3295 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3297 goto ice_aq_get_set_rss_lut_send;
3299 goto ice_aq_get_set_rss_lut_send;
3302 /* LUT size is only valid for Global and PF table types */
3304 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3305 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3306 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3307 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3309 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3310 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3311 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3312 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3314 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3315 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3316 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3317 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3318 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3323 status = ICE_ERR_PARAM;
3324 goto ice_aq_get_set_rss_lut_exit;
3327 ice_aq_get_set_rss_lut_send:
3328 cmd_resp->flags = CPU_TO_LE16(flags);
3329 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3331 ice_aq_get_set_rss_lut_exit:
3336 * ice_aq_get_rss_lut
3337 * @hw: pointer to the hardware structure
3338 * @vsi_handle: software VSI handle
3339 * @lut_type: LUT table type
3340 * @lut: pointer to the LUT buffer provided by the caller
3341 * @lut_size: size of the LUT buffer
3343 * get the RSS lookup table, PF or VSI type
3346 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3347 u8 *lut, u16 lut_size)
3349 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3350 return ICE_ERR_PARAM;
3352 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3353 lut_type, lut, lut_size, 0, false);
3357 * ice_aq_set_rss_lut
3358 * @hw: pointer to the hardware structure
3359 * @vsi_handle: software VSI handle
3360 * @lut_type: LUT table type
3361 * @lut: pointer to the LUT buffer provided by the caller
3362 * @lut_size: size of the LUT buffer
3364 * set the RSS lookup table, PF or VSI type
3367 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3368 u8 *lut, u16 lut_size)
3370 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3371 return ICE_ERR_PARAM;
3373 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3374 lut_type, lut, lut_size, 0, true);
3378 * __ice_aq_get_set_rss_key
3379 * @hw: pointer to the HW struct
3380 * @vsi_id: VSI FW index
3381 * @key: pointer to key info struct
3382 * @set: set true to set the key, false to get the key
3384 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3387 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3388 struct ice_aqc_get_set_rss_keys *key,
3391 struct ice_aqc_get_set_rss_key *cmd_resp;
3392 u16 key_size = sizeof(*key);
3393 struct ice_aq_desc desc;
3395 cmd_resp = &desc.params.get_set_rss_key;
3398 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3399 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3401 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3404 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3405 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3406 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3407 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3409 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3413 * ice_aq_get_rss_key
3414 * @hw: pointer to the HW struct
3415 * @vsi_handle: software VSI handle
3416 * @key: pointer to key info struct
3418 * get the RSS key per VSI
3421 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3422 struct ice_aqc_get_set_rss_keys *key)
3424 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3425 return ICE_ERR_PARAM;
3427 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3432 * ice_aq_set_rss_key
3433 * @hw: pointer to the HW struct
3434 * @vsi_handle: software VSI handle
3435 * @keys: pointer to key info struct
3437 * set the RSS key per VSI
3440 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3441 struct ice_aqc_get_set_rss_keys *keys)
3443 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3444 return ICE_ERR_PARAM;
3446 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3451 * ice_aq_add_lan_txq
3452 * @hw: pointer to the hardware structure
3453 * @num_qgrps: Number of added queue groups
3454 * @qg_list: list of queue groups to be added
3455 * @buf_size: size of buffer for indirect command
3456 * @cd: pointer to command details structure or NULL
3458 * Add Tx LAN queue (0x0C30)
3461 * Prior to calling add Tx LAN queue:
3462 * Initialize the following as part of the Tx queue context:
3463 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3464 * Cache profile and Packet shaper profile.
3466 * After add Tx LAN queue AQ command is completed:
3467 * Interrupts should be associated with specific queues,
3468 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3472 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3473 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3474 struct ice_sq_cd *cd)
3476 struct ice_aqc_add_tx_qgrp *list;
3477 struct ice_aqc_add_txqs *cmd;
3478 struct ice_aq_desc desc;
3479 u16 i, sum_size = 0;
3481 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3483 cmd = &desc.params.add_txqs;
3485 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3488 return ICE_ERR_PARAM;
3490 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3491 return ICE_ERR_PARAM;
3493 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3494 sum_size += ice_struct_size(list, txqs, list->num_txqs);
3495 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3499 if (buf_size != sum_size)
3500 return ICE_ERR_PARAM;
3502 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3504 cmd->num_qgrps = num_qgrps;
3506 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3510 * ice_aq_dis_lan_txq
3511 * @hw: pointer to the hardware structure
3512 * @num_qgrps: number of groups in the list
3513 * @qg_list: the list of groups to disable
3514 * @buf_size: the total size of the qg_list buffer in bytes
3515 * @rst_src: if called due to reset, specifies the reset source
3516 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3517 * @cd: pointer to command details structure or NULL
3519 * Disable LAN Tx queue (0x0C31)
3521 static enum ice_status
3522 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3523 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3524 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3525 struct ice_sq_cd *cd)
3527 struct ice_aqc_dis_txq_item *item;
3528 struct ice_aqc_dis_txqs *cmd;
3529 struct ice_aq_desc desc;
3530 enum ice_status status;
3533 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3534 cmd = &desc.params.dis_txqs;
3535 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3537 /* qg_list can be NULL only in VM/VF reset flow */
3538 if (!qg_list && !rst_src)
3539 return ICE_ERR_PARAM;
3541 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3542 return ICE_ERR_PARAM;
3544 cmd->num_entries = num_qgrps;
3546 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3547 ICE_AQC_Q_DIS_TIMEOUT_M);
3551 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3552 cmd->vmvf_and_timeout |=
3553 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3560 /* flush pipe on time out */
3561 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3562 /* If no queue group info, we are in a reset flow. Issue the AQ */
3566 /* set RD bit to indicate that command buffer is provided by the driver
3567 * and it needs to be read by the firmware
3569 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3571 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3572 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
3574 /* If the num of queues is even, add 2 bytes of padding */
3575 if ((item->num_qs % 2) == 0)
3580 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3584 return ICE_ERR_PARAM;
3587 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3590 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3591 vmvf_num, hw->adminq.sq_last_status);
3593 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3594 LE16_TO_CPU(qg_list[0].q_id[0]),
3595 hw->adminq.sq_last_status);
3601 * ice_aq_move_recfg_lan_txq
3602 * @hw: pointer to the hardware structure
3603 * @num_qs: number of queues to move/reconfigure
3604 * @is_move: true if this operation involves node movement
3605 * @is_tc_change: true if this operation involves a TC change
3606 * @subseq_call: true if this operation is a subsequent call
3607 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3608 * @timeout: timeout in units of 100 usec (valid values 0-50)
3609 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3610 * @buf: struct containing src/dest TEID and per-queue info
3611 * @buf_size: size of buffer for indirect command
3612 * @txqs_moved: out param, number of queues successfully moved
3613 * @cd: pointer to command details structure or NULL
3615 * Move / Reconfigure Tx LAN queues (0x0C32)
3618 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3619 bool is_tc_change, bool subseq_call, bool flush_pipe,
3620 u8 timeout, u32 *blocked_cgds,
3621 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3622 u8 *txqs_moved, struct ice_sq_cd *cd)
3624 struct ice_aqc_move_txqs *cmd;
3625 struct ice_aq_desc desc;
3626 enum ice_status status;
3628 cmd = &desc.params.move_txqs;
3629 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3631 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3632 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3633 return ICE_ERR_PARAM;
3635 if (is_tc_change && !flush_pipe && !blocked_cgds)
3636 return ICE_ERR_PARAM;
3638 if (!is_move && !is_tc_change)
3639 return ICE_ERR_PARAM;
3641 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3644 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3647 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3650 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3653 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3655 cmd->num_qs = num_qs;
3656 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3657 ICE_AQC_Q_CMD_TIMEOUT_M);
3659 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3661 if (!status && txqs_moved)
3662 *txqs_moved = cmd->num_qs;
3664 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3665 is_tc_change && !flush_pipe)
3666 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3671 /* End of FW Admin Queue command wrappers */
3674 * ice_write_byte - write a byte to a packed context structure
3675 * @src_ctx: the context structure to read from
3676 * @dest_ctx: the context to be written to
3677 * @ce_info: a description of the struct to be filled
3680 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3682 u8 src_byte, dest_byte, mask;
3686 /* copy from the next struct field */
3687 from = src_ctx + ce_info->offset;
3689 /* prepare the bits and mask */
3690 shift_width = ce_info->lsb % 8;
3691 mask = (u8)(BIT(ce_info->width) - 1);
3696 /* shift to correct alignment */
3697 mask <<= shift_width;
3698 src_byte <<= shift_width;
3700 /* get the current bits from the target bit string */
3701 dest = dest_ctx + (ce_info->lsb / 8);
3703 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3705 dest_byte &= ~mask; /* get the bits not changing */
3706 dest_byte |= src_byte; /* add in the new bits */
3708 /* put it all back */
3709 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3713 * ice_write_word - write a word to a packed context structure
3714 * @src_ctx: the context structure to read from
3715 * @dest_ctx: the context to be written to
3716 * @ce_info: a description of the struct to be filled
3719 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3726 /* copy from the next struct field */
3727 from = src_ctx + ce_info->offset;
3729 /* prepare the bits and mask */
3730 shift_width = ce_info->lsb % 8;
3731 mask = BIT(ce_info->width) - 1;
3733 /* don't swizzle the bits until after the mask because the mask bits
3734 * will be in a different bit position on big endian machines
3736 src_word = *(u16 *)from;
3739 /* shift to correct alignment */
3740 mask <<= shift_width;
3741 src_word <<= shift_width;
3743 /* get the current bits from the target bit string */
3744 dest = dest_ctx + (ce_info->lsb / 8);
3746 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3748 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3749 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3751 /* put it all back */
3752 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3756 * ice_write_dword - write a dword to a packed context structure
3757 * @src_ctx: the context structure to read from
3758 * @dest_ctx: the context to be written to
3759 * @ce_info: a description of the struct to be filled
3762 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3764 u32 src_dword, mask;
3769 /* copy from the next struct field */
3770 from = src_ctx + ce_info->offset;
3772 /* prepare the bits and mask */
3773 shift_width = ce_info->lsb % 8;
3775 /* if the field width is exactly 32 on an x86 machine, then the shift
3776 * operation will not work because the SHL instructions count is masked
3777 * to 5 bits so the shift will do nothing
3779 if (ce_info->width < 32)
3780 mask = BIT(ce_info->width) - 1;
3784 /* don't swizzle the bits until after the mask because the mask bits
3785 * will be in a different bit position on big endian machines
3787 src_dword = *(u32 *)from;
3790 /* shift to correct alignment */
3791 mask <<= shift_width;
3792 src_dword <<= shift_width;
3794 /* get the current bits from the target bit string */
3795 dest = dest_ctx + (ce_info->lsb / 8);
3797 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3799 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3800 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3802 /* put it all back */
3803 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3807 * ice_write_qword - write a qword to a packed context structure
3808 * @src_ctx: the context structure to read from
3809 * @dest_ctx: the context to be written to
3810 * @ce_info: a description of the struct to be filled
3813 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3815 u64 src_qword, mask;
3820 /* copy from the next struct field */
3821 from = src_ctx + ce_info->offset;
3823 /* prepare the bits and mask */
3824 shift_width = ce_info->lsb % 8;
3826 /* if the field width is exactly 64 on an x86 machine, then the shift
3827 * operation will not work because the SHL instructions count is masked
3828 * to 6 bits so the shift will do nothing
3830 if (ce_info->width < 64)
3831 mask = BIT_ULL(ce_info->width) - 1;
3835 /* don't swizzle the bits until after the mask because the mask bits
3836 * will be in a different bit position on big endian machines
3838 src_qword = *(u64 *)from;
3841 /* shift to correct alignment */
3842 mask <<= shift_width;
3843 src_qword <<= shift_width;
3845 /* get the current bits from the target bit string */
3846 dest = dest_ctx + (ce_info->lsb / 8);
3848 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3850 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
3851 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
3853 /* put it all back */
3854 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3858 * ice_set_ctx - set context bits in packed structure
3859 * @hw: pointer to the hardware structure
3860 * @src_ctx: pointer to a generic non-packed context structure
3861 * @dest_ctx: pointer to memory for the packed structure
3862 * @ce_info: a description of the structure to be transformed
3865 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3866 const struct ice_ctx_ele *ce_info)
3870 for (f = 0; ce_info[f].width; f++) {
3871 /* We have to deal with each element of the FW response
3872 * using the correct size so that we are correct regardless
3873 * of the endianness of the machine.
3875 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3876 ice_debug(hw, ICE_DBG_QCTX,
3877 "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3878 f, ce_info[f].width, ce_info[f].size_of);
3881 switch (ce_info[f].size_of) {
3883 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3886 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3889 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3892 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3895 return ICE_ERR_INVAL_SIZE;
3903 * ice_read_byte - read context byte into struct
3904 * @src_ctx: the context structure to read from
3905 * @dest_ctx: the context to be written to
3906 * @ce_info: a description of the struct to be filled
3909 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3915 /* prepare the bits and mask */
3916 shift_width = ce_info->lsb % 8;
3917 mask = (u8)(BIT(ce_info->width) - 1);
3919 /* shift to correct alignment */
3920 mask <<= shift_width;
3922 /* get the current bits from the src bit string */
3923 src = src_ctx + (ce_info->lsb / 8);
3925 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3927 dest_byte &= ~(mask);
3929 dest_byte >>= shift_width;
3931 /* get the address from the struct field */
3932 target = dest_ctx + ce_info->offset;
3934 /* put it back in the struct */
3935 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3939 * ice_read_word - read context word into struct
3940 * @src_ctx: the context structure to read from
3941 * @dest_ctx: the context to be written to
3942 * @ce_info: a description of the struct to be filled
3945 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3947 u16 dest_word, mask;
3952 /* prepare the bits and mask */
3953 shift_width = ce_info->lsb % 8;
3954 mask = BIT(ce_info->width) - 1;
3956 /* shift to correct alignment */
3957 mask <<= shift_width;
3959 /* get the current bits from the src bit string */
3960 src = src_ctx + (ce_info->lsb / 8);
3962 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
3964 /* the data in the memory is stored as little endian so mask it
3967 src_word &= ~(CPU_TO_LE16(mask));
3969 /* get the data back into host order before shifting */
3970 dest_word = LE16_TO_CPU(src_word);
3972 dest_word >>= shift_width;
3974 /* get the address from the struct field */
3975 target = dest_ctx + ce_info->offset;
3977 /* put it back in the struct */
3978 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3982 * ice_read_dword - read context dword into struct
3983 * @src_ctx: the context structure to read from
3984 * @dest_ctx: the context to be written to
3985 * @ce_info: a description of the struct to be filled
3988 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3990 u32 dest_dword, mask;
3995 /* prepare the bits and mask */
3996 shift_width = ce_info->lsb % 8;
3998 /* if the field width is exactly 32 on an x86 machine, then the shift
3999 * operation will not work because the SHL instructions count is masked
4000 * to 5 bits so the shift will do nothing
4002 if (ce_info->width < 32)
4003 mask = BIT(ce_info->width) - 1;
4007 /* shift to correct alignment */
4008 mask <<= shift_width;
4010 /* get the current bits from the src bit string */
4011 src = src_ctx + (ce_info->lsb / 8);
4013 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4015 /* the data in the memory is stored as little endian so mask it
4018 src_dword &= ~(CPU_TO_LE32(mask));
4020 /* get the data back into host order before shifting */
4021 dest_dword = LE32_TO_CPU(src_dword);
4023 dest_dword >>= shift_width;
4025 /* get the address from the struct field */
4026 target = dest_ctx + ce_info->offset;
4028 /* put it back in the struct */
4029 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4033 * ice_read_qword - read context qword into struct
4034 * @src_ctx: the context structure to read from
4035 * @dest_ctx: the context to be written to
4036 * @ce_info: a description of the struct to be filled
4039 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4041 u64 dest_qword, mask;
4046 /* prepare the bits and mask */
4047 shift_width = ce_info->lsb % 8;
4049 /* if the field width is exactly 64 on an x86 machine, then the shift
4050 * operation will not work because the SHL instructions count is masked
4051 * to 6 bits so the shift will do nothing
4053 if (ce_info->width < 64)
4054 mask = BIT_ULL(ce_info->width) - 1;
4058 /* shift to correct alignment */
4059 mask <<= shift_width;
4061 /* get the current bits from the src bit string */
4062 src = src_ctx + (ce_info->lsb / 8);
4064 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4066 /* the data in the memory is stored as little endian so mask it
4069 src_qword &= ~(CPU_TO_LE64(mask));
4071 /* get the data back into host order before shifting */
4072 dest_qword = LE64_TO_CPU(src_qword);
4074 dest_qword >>= shift_width;
4076 /* get the address from the struct field */
4077 target = dest_ctx + ce_info->offset;
4079 /* put it back in the struct */
4080 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4084 * ice_get_ctx - extract context bits from a packed structure
4085 * @src_ctx: pointer to a generic packed context structure
4086 * @dest_ctx: pointer to a generic non-packed context structure
4087 * @ce_info: a description of the structure to be read from
4090 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4094 for (f = 0; ce_info[f].width; f++) {
4095 switch (ce_info[f].size_of) {
4097 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4100 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4103 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4106 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4109 /* nothing to do, just keep going */
4118 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4119 * @hw: pointer to the HW struct
4120 * @vsi_handle: software VSI handle
4122 * @q_handle: software queue handle
4125 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4127 struct ice_vsi_ctx *vsi;
4128 struct ice_q_ctx *q_ctx;
4130 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4133 if (q_handle >= vsi->num_lan_q_entries[tc])
4135 if (!vsi->lan_q_ctx[tc])
4137 q_ctx = vsi->lan_q_ctx[tc];
4138 return &q_ctx[q_handle];
4143 * @pi: port information structure
4144 * @vsi_handle: software VSI handle
4146 * @q_handle: software queue handle
4147 * @num_qgrps: Number of added queue groups
4148 * @buf: list of queue groups to be added
4149 * @buf_size: size of buffer for indirect command
4150 * @cd: pointer to command details structure or NULL
4152 * This function adds one LAN queue
4155 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4156 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4157 struct ice_sq_cd *cd)
4159 struct ice_aqc_txsched_elem_data node = { 0 };
4160 struct ice_sched_node *parent;
4161 struct ice_q_ctx *q_ctx;
4162 enum ice_status status;
4165 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4168 if (num_qgrps > 1 || buf->num_txqs > 1)
4169 return ICE_ERR_MAX_LIMIT;
4173 if (!ice_is_vsi_valid(hw, vsi_handle))
4174 return ICE_ERR_PARAM;
4176 ice_acquire_lock(&pi->sched_lock);
4178 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4180 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4182 status = ICE_ERR_PARAM;
4186 /* find a parent node */
4187 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4188 ICE_SCHED_NODE_OWNER_LAN);
4190 status = ICE_ERR_PARAM;
4194 buf->parent_teid = parent->info.node_teid;
4195 node.parent_teid = parent->info.node_teid;
4196 /* Mark that the values in the "generic" section as valid. The default
4197 * value in the "generic" section is zero. This means that :
4198 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4199 * - 0 priority among siblings, indicated by Bit 1-3.
4200 * - WFQ, indicated by Bit 4.
4201 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4203 * - Bit 7 is reserved.
4204 * Without setting the generic section as valid in valid_sections, the
4205 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4207 buf->txqs[0].info.valid_sections =
4208 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4209 ICE_AQC_ELEM_VALID_EIR;
4210 buf->txqs[0].info.generic = 0;
4211 buf->txqs[0].info.cir_bw.bw_profile_idx =
4212 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4213 buf->txqs[0].info.cir_bw.bw_alloc =
4214 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4215 buf->txqs[0].info.eir_bw.bw_profile_idx =
4216 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4217 buf->txqs[0].info.eir_bw.bw_alloc =
4218 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4220 /* add the LAN queue */
4221 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4222 if (status != ICE_SUCCESS) {
4223 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4224 LE16_TO_CPU(buf->txqs[0].txq_id),
4225 hw->adminq.sq_last_status);
4229 node.node_teid = buf->txqs[0].q_teid;
4230 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4231 q_ctx->q_handle = q_handle;
4232 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4234 /* add a leaf node into scheduler tree queue layer */
4235 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4237 status = ice_sched_replay_q_bw(pi, q_ctx);
4240 ice_release_lock(&pi->sched_lock);
4246 * @pi: port information structure
4247 * @vsi_handle: software VSI handle
4249 * @num_queues: number of queues
4250 * @q_handles: pointer to software queue handle array
4251 * @q_ids: pointer to the q_id array
4252 * @q_teids: pointer to queue node teids
4253 * @rst_src: if called due to reset, specifies the reset source
4254 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4255 * @cd: pointer to command details structure or NULL
4257 * This function removes queues and their corresponding nodes in SW DB
4260 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4261 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4262 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4263 struct ice_sq_cd *cd)
4265 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4266 struct ice_aqc_dis_txq_item *qg_list;
4267 struct ice_q_ctx *q_ctx;
4271 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4277 /* if queue is disabled already yet the disable queue command
4278 * has to be sent to complete the VF reset, then call
4279 * ice_aq_dis_lan_txq without any queue information
4282 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4287 buf_size = ice_struct_size(qg_list, q_id, 1);
4288 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
4290 return ICE_ERR_NO_MEMORY;
4292 ice_acquire_lock(&pi->sched_lock);
4294 for (i = 0; i < num_queues; i++) {
4295 struct ice_sched_node *node;
4297 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4300 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4302 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4306 if (q_ctx->q_handle != q_handles[i]) {
4307 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4308 q_ctx->q_handle, q_handles[i]);
4311 qg_list->parent_teid = node->info.parent_teid;
4312 qg_list->num_qs = 1;
4313 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
4314 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4317 if (status != ICE_SUCCESS)
4319 ice_free_sched_node(pi, node);
4320 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4322 ice_release_lock(&pi->sched_lock);
4323 ice_free(hw, qg_list);
4328 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4329 * @pi: port information structure
4330 * @vsi_handle: software VSI handle
4331 * @tc_bitmap: TC bitmap
4332 * @maxqs: max queues array per TC
4333 * @owner: LAN or RDMA
4335 * This function adds/updates the VSI queues per TC.
4337 static enum ice_status
4338 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4339 u16 *maxqs, u8 owner)
4341 enum ice_status status = ICE_SUCCESS;
4344 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4347 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4348 return ICE_ERR_PARAM;
4350 ice_acquire_lock(&pi->sched_lock);
4352 ice_for_each_traffic_class(i) {
4353 /* configuration is possible only if TC node is present */
4354 if (!ice_sched_get_tc_node(pi, i))
4357 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4358 ice_is_tc_ena(tc_bitmap, i));
4363 ice_release_lock(&pi->sched_lock);
4368 * ice_cfg_vsi_lan - configure VSI LAN queues
4369 * @pi: port information structure
4370 * @vsi_handle: software VSI handle
4371 * @tc_bitmap: TC bitmap
4372 * @max_lanqs: max LAN queues array per TC
4374 * This function adds/updates the VSI LAN queues per TC.
4377 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4380 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4381 ICE_SCHED_NODE_OWNER_LAN);
4385 * ice_is_main_vsi - checks whether the VSI is main VSI
4386 * @hw: pointer to the HW struct
4387 * @vsi_handle: VSI handle
4389 * Checks whether the VSI is the main VSI (the first PF VSI created on
4392 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4394 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4398 * ice_replay_pre_init - replay pre initialization
4399 * @hw: pointer to the HW struct
4400 * @sw: pointer to switch info struct for which function initializes filters
4402 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4404 static enum ice_status
4405 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4409 /* Delete old entries from replay filter list head if there is any */
4410 ice_rm_sw_replay_rule_info(hw, sw);
4411 /* In start of replay, move entries into replay_rules list, it
4412 * will allow adding rules entries back to filt_rules list,
4413 * which is operational list.
4415 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4416 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4417 &sw->recp_list[i].filt_replay_rules);
4418 ice_sched_replay_agg_vsi_preinit(hw);
4420 return ice_sched_replay_tc_node_bw(hw->port_info);
4424 * ice_replay_vsi - replay VSI configuration
4425 * @hw: pointer to the HW struct
4426 * @vsi_handle: driver VSI handle
4428 * Restore all VSI configuration after reset. It is required to call this
4429 * function with main VSI first.
4431 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4433 struct ice_switch_info *sw = hw->switch_info;
4434 struct ice_port_info *pi = hw->port_info;
4435 enum ice_status status;
4437 if (!ice_is_vsi_valid(hw, vsi_handle))
4438 return ICE_ERR_PARAM;
4440 /* Replay pre-initialization if there is any */
4441 if (ice_is_main_vsi(hw, vsi_handle)) {
4442 status = ice_replay_pre_init(hw, sw);
4446 /* Replay per VSI all RSS configurations */
4447 status = ice_replay_rss_cfg(hw, vsi_handle);
4450 /* Replay per VSI all filters */
4451 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4453 status = ice_replay_vsi_agg(hw, vsi_handle);
4458 * ice_replay_post - post replay configuration cleanup
4459 * @hw: pointer to the HW struct
4461 * Post replay cleanup.
4463 void ice_replay_post(struct ice_hw *hw)
4465 /* Delete old entries from replay filter list head */
4466 ice_rm_all_sw_replay_rule_info(hw);
4467 ice_sched_replay_agg(hw);
4471 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4472 * @hw: ptr to the hardware info
4473 * @reg: offset of 64 bit HW register to read from
4474 * @prev_stat_loaded: bool to specify if previous stats are loaded
4475 * @prev_stat: ptr to previous loaded stat value
4476 * @cur_stat: ptr to current stat value
4479 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4480 u64 *prev_stat, u64 *cur_stat)
4482 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4484 /* device stats are not reset at PFR, they likely will not be zeroed
4485 * when the driver starts. Thus, save the value from the first read
4486 * without adding to the statistic value so that we report stats which
4487 * count up from zero.
4489 if (!prev_stat_loaded) {
4490 *prev_stat = new_data;
4494 /* Calculate the difference between the new and old values, and then
4495 * add it to the software stat value.
4497 if (new_data >= *prev_stat)
4498 *cur_stat += new_data - *prev_stat;
4500 /* to manage the potential roll-over */
4501 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4503 /* Update the previously stored value to prepare for next read */
4504 *prev_stat = new_data;
4508 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4509 * @hw: ptr to the hardware info
4510 * @reg: offset of HW register to read from
4511 * @prev_stat_loaded: bool to specify if previous stats are loaded
4512 * @prev_stat: ptr to previous loaded stat value
4513 * @cur_stat: ptr to current stat value
4516 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4517 u64 *prev_stat, u64 *cur_stat)
4521 new_data = rd32(hw, reg);
4523 /* device stats are not reset at PFR, they likely will not be zeroed
4524 * when the driver starts. Thus, save the value from the first read
4525 * without adding to the statistic value so that we report stats which
4526 * count up from zero.
4528 if (!prev_stat_loaded) {
4529 *prev_stat = new_data;
4533 /* Calculate the difference between the new and old values, and then
4534 * add it to the software stat value.
4536 if (new_data >= *prev_stat)
4537 *cur_stat += new_data - *prev_stat;
4539 /* to manage the potential roll-over */
4540 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4542 /* Update the previously stored value to prepare for next read */
4543 *prev_stat = new_data;
4547 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4548 * @hw: ptr to the hardware info
4549 * @vsi_handle: VSI handle
4550 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4551 * @cur_stats: ptr to current stats structure
4553 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4554 * thus cannot be read using the normal ice_stat_update32 function.
4556 * Read the GLV_REPC register associated with the given VSI, and update the
4557 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4559 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4560 * cleared each time it's read.
4562 * Note that the GLV_RDPC register also counts the causes that would trigger
4563 * GLV_REPC. However, it does not give the finer grained detail about why the
4564 * packets are being dropped. The GLV_REPC values can be used to distinguish
4565 * whether Rx packets are dropped due to errors or due to no available
4569 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4570 struct ice_eth_stats *cur_stats)
4572 u16 vsi_num, no_desc, error_cnt;
4575 if (!ice_is_vsi_valid(hw, vsi_handle))
4578 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4580 /* If we haven't loaded stats yet, just clear the current value */
4581 if (!prev_stat_loaded) {
4582 wr32(hw, GLV_REPC(vsi_num), 0);
4586 repc = rd32(hw, GLV_REPC(vsi_num));
4587 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4588 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4590 /* Clear the count by writing to the stats register */
4591 wr32(hw, GLV_REPC(vsi_num), 0);
4593 cur_stats->rx_no_desc += no_desc;
4594 cur_stats->rx_errors += error_cnt;
4598 * ice_sched_query_elem - query element information from HW
4599 * @hw: pointer to the HW struct
4600 * @node_teid: node TEID to be queried
4601 * @buf: buffer to element information
4603 * This function queries HW element information
4606 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4607 struct ice_aqc_txsched_elem_data *buf)
4609 u16 buf_size, num_elem_ret = 0;
4610 enum ice_status status;
4612 buf_size = sizeof(*buf);
4613 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4614 buf->node_teid = CPU_TO_LE32(node_teid);
4615 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4617 if (status != ICE_SUCCESS || num_elem_ret != 1)
4618 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4623 * ice_get_fw_mode - returns FW mode
4624 * @hw: pointer to the HW struct
4626 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4628 #define ICE_FW_MODE_DBG_M BIT(0)
4629 #define ICE_FW_MODE_REC_M BIT(1)
4630 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4633 /* check the current FW mode */
4634 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4636 if (fw_mode & ICE_FW_MODE_DBG_M)
4637 return ICE_FW_MODE_DBG;
4638 else if (fw_mode & ICE_FW_MODE_REC_M)
4639 return ICE_FW_MODE_REC;
4640 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4641 return ICE_FW_MODE_ROLLBACK;
4643 return ICE_FW_MODE_NORMAL;
4647 * ice_fw_supports_link_override
4648 * @hw: pointer to the hardware structure
4650 * Checks if the firmware supports link override
4652 bool ice_fw_supports_link_override(struct ice_hw *hw)
4654 /* Currently, only supported for E810 devices */
4655 if (hw->mac_type != ICE_MAC_E810)
4658 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4659 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4661 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4662 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4664 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4672 * ice_get_link_default_override
4673 * @ldo: pointer to the link default override struct
4674 * @pi: pointer to the port info struct
4676 * Gets the link default override for a port
4679 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4680 struct ice_port_info *pi)
4682 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4683 struct ice_hw *hw = pi->hw;
4684 enum ice_status status;
4686 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4687 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4689 ice_debug(hw, ICE_DBG_INIT,
4690 "Failed to read link override TLV.\n");
4694 /* Each port has its own config; calculate for our port */
4695 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4696 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4698 /* link options first */
4699 status = ice_read_sr_word(hw, tlv_start, &buf);
4701 ice_debug(hw, ICE_DBG_INIT,
4702 "Failed to read override link options.\n");
4705 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4706 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4707 ICE_LINK_OVERRIDE_PHY_CFG_S;
4709 /* link PHY config */
4710 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4711 status = ice_read_sr_word(hw, offset, &buf);
4713 ice_debug(hw, ICE_DBG_INIT,
4714 "Failed to read override phy config.\n");
4717 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4720 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4721 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4722 status = ice_read_sr_word(hw, (offset + i), &buf);
4724 ice_debug(hw, ICE_DBG_INIT,
4725 "Failed to read override link options.\n");
4728 /* shift 16 bits at a time to fill 64 bits */
4729 ldo->phy_type_low |= ((u64)buf << (i * 16));
4732 /* PHY types high */
4733 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4734 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4735 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4736 status = ice_read_sr_word(hw, (offset + i), &buf);
4738 ice_debug(hw, ICE_DBG_INIT,
4739 "Failed to read override link options.\n");
4742 /* shift 16 bits at a time to fill 64 bits */
4743 ldo->phy_type_high |= ((u64)buf << (i * 16));
4750 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4751 * @caps: get PHY capability data
4753 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4755 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4756 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4757 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4758 ICE_AQC_PHY_AN_EN_CLAUSE37))
4765 * ice_aq_set_lldp_mib - Set the LLDP MIB
4766 * @hw: pointer to the HW struct
4767 * @mib_type: Local, Remote or both Local and Remote MIBs
4768 * @buf: pointer to the caller-supplied buffer to store the MIB block
4769 * @buf_size: size of the buffer (in bytes)
4770 * @cd: pointer to command details structure or NULL
4772 * Set the LLDP MIB. (0x0A08)
4775 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4776 struct ice_sq_cd *cd)
4778 struct ice_aqc_lldp_set_local_mib *cmd;
4779 struct ice_aq_desc desc;
4781 cmd = &desc.params.lldp_set_mib;
4783 if (buf_size == 0 || !buf)
4784 return ICE_ERR_PARAM;
4786 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4788 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
4789 desc.datalen = CPU_TO_LE16(buf_size);
4791 cmd->type = mib_type;
4792 cmd->length = CPU_TO_LE16(buf_size);
4794 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);