1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 300
15 * ice_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
18 * This function sets the MAC type of the adapter based on the
19 * vendor ID and device ID stored in the HW structure.
21 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
23 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
25 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
26 return ICE_ERR_DEVICE_NOT_SUPPORTED;
28 switch (hw->device_id) {
29 case ICE_DEV_ID_E810C_BACKPLANE:
30 case ICE_DEV_ID_E810C_QSFP:
31 case ICE_DEV_ID_E810C_SFP:
32 case ICE_DEV_ID_E810_XXV_BACKPLANE:
33 case ICE_DEV_ID_E810_XXV_QSFP:
34 case ICE_DEV_ID_E810_XXV_SFP:
35 hw->mac_type = ICE_MAC_E810;
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 case ICE_DEV_ID_E823C_10G_BASE_T:
52 case ICE_DEV_ID_E823C_BACKPLANE:
53 case ICE_DEV_ID_E823C_QSFP:
54 case ICE_DEV_ID_E823C_SFP:
55 case ICE_DEV_ID_E823C_SGMII:
56 hw->mac_type = ICE_MAC_GENERIC;
59 hw->mac_type = ICE_MAC_UNKNOWN;
63 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
69 * @hw: pointer to the hardware structure
71 * returns true if mac_type is ICE_MAC_GENERIC, false if not
73 bool ice_is_generic_mac(struct ice_hw *hw)
75 return hw->mac_type == ICE_MAC_GENERIC;
80 * @hw: pointer to the hardware structure
82 * returns true if the device is E810 based, false if not.
84 bool ice_is_e810(struct ice_hw *hw)
86 return hw->mac_type == ICE_MAC_E810;
90 * ice_clear_pf_cfg - Clear PF configuration
91 * @hw: pointer to the hardware structure
93 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
94 * configuration, flow director filters, etc.).
96 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
98 struct ice_aq_desc desc;
100 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
102 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
106 * ice_aq_manage_mac_read - manage MAC address read command
107 * @hw: pointer to the HW struct
108 * @buf: a virtual buffer to hold the manage MAC read response
109 * @buf_size: Size of the virtual buffer
110 * @cd: pointer to command details structure or NULL
112 * This function is used to return per PF station MAC address (0x0107).
113 * NOTE: Upon successful completion of this command, MAC address information
114 * is returned in user specified buffer. Please interpret user specified
115 * buffer as "manage_mac_read" response.
116 * Response such as various MAC addresses are stored in HW struct (port.mac)
117 * ice_discover_dev_caps is expected to be called before this function is
120 static enum ice_status
121 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
122 struct ice_sq_cd *cd)
124 struct ice_aqc_manage_mac_read_resp *resp;
125 struct ice_aqc_manage_mac_read *cmd;
126 struct ice_aq_desc desc;
127 enum ice_status status;
131 cmd = &desc.params.mac_read;
133 if (buf_size < sizeof(*resp))
134 return ICE_ERR_BUF_TOO_SHORT;
136 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
138 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
142 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
143 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
145 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
146 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
150 /* A single port can report up to two (LAN and WoL) addresses */
151 for (i = 0; i < cmd->num_addr; i++)
152 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
153 ice_memcpy(hw->port_info->mac.lan_addr,
154 resp[i].mac_addr, ETH_ALEN,
156 ice_memcpy(hw->port_info->mac.perm_addr,
158 ETH_ALEN, ICE_DMA_TO_NONDMA);
165 * ice_aq_get_phy_caps - returns PHY capabilities
166 * @pi: port information structure
167 * @qual_mods: report qualified modules
168 * @report_mode: report mode capabilities
169 * @pcaps: structure for PHY capabilities to be filled
170 * @cd: pointer to command details structure or NULL
172 * Returns the various PHY capabilities supported on the Port (0x0600)
175 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
176 struct ice_aqc_get_phy_caps_data *pcaps,
177 struct ice_sq_cd *cd)
179 struct ice_aqc_get_phy_caps *cmd;
180 u16 pcaps_size = sizeof(*pcaps);
181 struct ice_aq_desc desc;
182 enum ice_status status;
185 cmd = &desc.params.get_phy;
187 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
188 return ICE_ERR_PARAM;
191 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
192 !ice_fw_supports_report_dflt_cfg(hw))
193 return ICE_ERR_PARAM;
195 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
198 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
200 cmd->param0 |= CPU_TO_LE16(report_mode);
201 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
203 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
205 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
206 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
207 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
208 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
209 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
210 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
211 pcaps->low_power_ctrl_an);
212 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
213 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
215 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
216 pcaps->link_fec_options);
217 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
218 pcaps->module_compliance_enforcement);
219 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
220 pcaps->extended_compliance_code);
221 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
222 pcaps->module_type[0]);
223 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
224 pcaps->module_type[1]);
225 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
226 pcaps->module_type[2]);
228 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
229 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
230 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
231 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
232 sizeof(pi->phy.link_info.module_type),
233 ICE_NONDMA_TO_NONDMA);
240 * ice_aq_get_link_topo_handle - get link topology node return status
241 * @pi: port information structure
242 * @node_type: requested node type
243 * @cd: pointer to command details structure or NULL
245 * Get link topology node return status for specified node type (0x06E0)
247 * Node type cage can be used to determine if cage is present. If AQC
248 * returns error (ENOENT), then no cage present. If no cage present, then
249 * connection type is backplane or BASE-T.
251 static enum ice_status
252 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
253 struct ice_sq_cd *cd)
255 struct ice_aqc_get_link_topo *cmd;
256 struct ice_aq_desc desc;
258 cmd = &desc.params.get_link_topo;
260 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
262 cmd->addr.topo_params.node_type_ctx =
263 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
264 ICE_AQC_LINK_TOPO_NODE_CTX_S);
267 cmd->addr.topo_params.node_type_ctx |=
268 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
270 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
274 * ice_is_media_cage_present
275 * @pi: port information structure
277 * Returns true if media cage is present, else false. If no cage, then
278 * media type is backplane or BASE-T.
280 static bool ice_is_media_cage_present(struct ice_port_info *pi)
282 /* Node type cage can be used to determine if cage is present. If AQC
283 * returns error (ENOENT), then no cage present. If no cage present then
284 * connection type is backplane or BASE-T.
286 return !ice_aq_get_link_topo_handle(pi,
287 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
292 * ice_get_media_type - Gets media type
293 * @pi: port information structure
295 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
297 struct ice_link_status *hw_link_info;
300 return ICE_MEDIA_UNKNOWN;
302 hw_link_info = &pi->phy.link_info;
303 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
304 /* If more than one media type is selected, report unknown */
305 return ICE_MEDIA_UNKNOWN;
307 if (hw_link_info->phy_type_low) {
308 /* 1G SGMII is a special case where some DA cable PHYs
309 * may show this as an option when it really shouldn't
310 * be since SGMII is meant to be between a MAC and a PHY
311 * in a backplane. Try to detect this case and handle it
313 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
314 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
315 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
316 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
317 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
320 switch (hw_link_info->phy_type_low) {
321 case ICE_PHY_TYPE_LOW_1000BASE_SX:
322 case ICE_PHY_TYPE_LOW_1000BASE_LX:
323 case ICE_PHY_TYPE_LOW_10GBASE_SR:
324 case ICE_PHY_TYPE_LOW_10GBASE_LR:
325 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
326 case ICE_PHY_TYPE_LOW_25GBASE_SR:
327 case ICE_PHY_TYPE_LOW_25GBASE_LR:
328 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
329 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
330 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
331 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
332 case ICE_PHY_TYPE_LOW_50GBASE_SR:
333 case ICE_PHY_TYPE_LOW_50GBASE_FR:
334 case ICE_PHY_TYPE_LOW_50GBASE_LR:
335 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
336 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
337 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
338 case ICE_PHY_TYPE_LOW_100GBASE_DR:
339 return ICE_MEDIA_FIBER;
340 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
341 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
342 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
343 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
344 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
345 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
346 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
347 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
348 return ICE_MEDIA_FIBER;
349 case ICE_PHY_TYPE_LOW_100BASE_TX:
350 case ICE_PHY_TYPE_LOW_1000BASE_T:
351 case ICE_PHY_TYPE_LOW_2500BASE_T:
352 case ICE_PHY_TYPE_LOW_5GBASE_T:
353 case ICE_PHY_TYPE_LOW_10GBASE_T:
354 case ICE_PHY_TYPE_LOW_25GBASE_T:
355 return ICE_MEDIA_BASET;
356 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
357 case ICE_PHY_TYPE_LOW_25GBASE_CR:
358 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
359 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
360 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
361 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
362 case ICE_PHY_TYPE_LOW_50GBASE_CP:
363 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
364 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
365 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
367 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
368 case ICE_PHY_TYPE_LOW_40G_XLAUI:
369 case ICE_PHY_TYPE_LOW_50G_LAUI2:
370 case ICE_PHY_TYPE_LOW_50G_AUI2:
371 case ICE_PHY_TYPE_LOW_50G_AUI1:
372 case ICE_PHY_TYPE_LOW_100G_AUI4:
373 case ICE_PHY_TYPE_LOW_100G_CAUI4:
374 if (ice_is_media_cage_present(pi))
375 return ICE_MEDIA_AUI;
377 case ICE_PHY_TYPE_LOW_1000BASE_KX:
378 case ICE_PHY_TYPE_LOW_2500BASE_KX:
379 case ICE_PHY_TYPE_LOW_2500BASE_X:
380 case ICE_PHY_TYPE_LOW_5GBASE_KR:
381 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
382 case ICE_PHY_TYPE_LOW_25GBASE_KR:
383 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
384 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
385 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
386 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
387 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
388 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
389 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
390 return ICE_MEDIA_BACKPLANE;
393 switch (hw_link_info->phy_type_high) {
394 case ICE_PHY_TYPE_HIGH_100G_AUI2:
395 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
396 if (ice_is_media_cage_present(pi))
397 return ICE_MEDIA_AUI;
399 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
400 return ICE_MEDIA_BACKPLANE;
401 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
402 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
403 return ICE_MEDIA_FIBER;
406 return ICE_MEDIA_UNKNOWN;
410 * ice_aq_get_link_info
411 * @pi: port information structure
412 * @ena_lse: enable/disable LinkStatusEvent reporting
413 * @link: pointer to link status structure - optional
414 * @cd: pointer to command details structure or NULL
416 * Get Link Status (0x607). Returns the link status of the adapter.
419 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
420 struct ice_link_status *link, struct ice_sq_cd *cd)
422 struct ice_aqc_get_link_status_data link_data = { 0 };
423 struct ice_aqc_get_link_status *resp;
424 struct ice_link_status *li_old, *li;
425 enum ice_media_type *hw_media_type;
426 struct ice_fc_info *hw_fc_info;
427 bool tx_pause, rx_pause;
428 struct ice_aq_desc desc;
429 enum ice_status status;
434 return ICE_ERR_PARAM;
436 li_old = &pi->phy.link_info_old;
437 hw_media_type = &pi->phy.media_type;
438 li = &pi->phy.link_info;
439 hw_fc_info = &pi->fc;
441 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
442 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
443 resp = &desc.params.get_link_status;
444 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
445 resp->lport_num = pi->lport;
447 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
449 if (status != ICE_SUCCESS)
452 /* save off old link status information */
455 /* update current link status information */
456 li->link_speed = LE16_TO_CPU(link_data.link_speed);
457 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
458 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
459 *hw_media_type = ice_get_media_type(pi);
460 li->link_info = link_data.link_info;
461 li->link_cfg_err = link_data.link_cfg_err;
462 li->an_info = link_data.an_info;
463 li->ext_info = link_data.ext_info;
464 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
465 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
466 li->topo_media_conflict = link_data.topo_media_conflict;
467 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
468 ICE_AQ_CFG_PACING_TYPE_M);
471 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
472 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
473 if (tx_pause && rx_pause)
474 hw_fc_info->current_mode = ICE_FC_FULL;
476 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
478 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
480 hw_fc_info->current_mode = ICE_FC_NONE;
482 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
484 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
485 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
486 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
487 (unsigned long long)li->phy_type_low);
488 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
489 (unsigned long long)li->phy_type_high);
490 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
491 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
492 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
493 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
494 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
495 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
496 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
497 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
499 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
501 /* save link status information */
505 /* flag cleared so calling functions don't call AQ again */
506 pi->phy.get_link_info = false;
512 * ice_fill_tx_timer_and_fc_thresh
513 * @hw: pointer to the HW struct
514 * @cmd: pointer to MAC cfg structure
516 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
520 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
521 struct ice_aqc_set_mac_cfg *cmd)
523 u16 fc_thres_val, tx_timer_val;
526 /* We read back the transmit timer and fc threshold value of
527 * LFC. Thus, we will use index =
528 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
530 * Also, because we are opearating on transmit timer and fc
531 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
533 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
535 /* Retrieve the transmit timer */
536 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
538 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
539 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
541 /* Retrieve the fc threshold */
542 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
543 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
545 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
550 * @hw: pointer to the HW struct
551 * @max_frame_size: Maximum Frame Size to be supported
552 * @cd: pointer to command details structure or NULL
554 * Set MAC configuration (0x0603)
557 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
559 struct ice_aqc_set_mac_cfg *cmd;
560 struct ice_aq_desc desc;
562 cmd = &desc.params.set_mac_cfg;
564 if (max_frame_size == 0)
565 return ICE_ERR_PARAM;
567 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
569 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
571 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
573 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
577 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
578 * @hw: pointer to the HW struct
580 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
582 struct ice_switch_info *sw;
583 enum ice_status status;
585 hw->switch_info = (struct ice_switch_info *)
586 ice_malloc(hw, sizeof(*hw->switch_info));
588 sw = hw->switch_info;
591 return ICE_ERR_NO_MEMORY;
593 INIT_LIST_HEAD(&sw->vsi_list_map_head);
594 sw->prof_res_bm_init = 0;
596 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
598 ice_free(hw, hw->switch_info);
605 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
606 * @hw: pointer to the HW struct
607 * @sw: pointer to switch info struct for which function clears filters
610 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
612 struct ice_vsi_list_map_info *v_pos_map;
613 struct ice_vsi_list_map_info *v_tmp_map;
614 struct ice_sw_recipe *recps;
620 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
621 ice_vsi_list_map_info, list_entry) {
622 LIST_DEL(&v_pos_map->list_entry);
623 ice_free(hw, v_pos_map);
625 recps = sw->recp_list;
626 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
627 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
629 recps[i].root_rid = i;
630 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
631 &recps[i].rg_list, ice_recp_grp_entry,
633 LIST_DEL(&rg_entry->l_entry);
634 ice_free(hw, rg_entry);
637 if (recps[i].adv_rule) {
638 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
639 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
641 ice_destroy_lock(&recps[i].filt_rule_lock);
642 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
643 &recps[i].filt_rules,
644 ice_adv_fltr_mgmt_list_entry,
646 LIST_DEL(&lst_itr->list_entry);
647 ice_free(hw, lst_itr->lkups);
648 ice_free(hw, lst_itr);
651 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
653 ice_destroy_lock(&recps[i].filt_rule_lock);
654 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
655 &recps[i].filt_rules,
656 ice_fltr_mgmt_list_entry,
658 LIST_DEL(&lst_itr->list_entry);
659 ice_free(hw, lst_itr);
662 if (recps[i].root_buf)
663 ice_free(hw, recps[i].root_buf);
665 ice_rm_sw_replay_rule_info(hw, sw);
666 ice_free(hw, sw->recp_list);
671 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
672 * @hw: pointer to the HW struct
674 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
676 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
680 * ice_get_itr_intrl_gran
681 * @hw: pointer to the HW struct
683 * Determines the ITR/INTRL granularities based on the maximum aggregate
684 * bandwidth according to the device's configuration during power-on.
686 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
688 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
689 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
690 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
692 switch (max_agg_bw) {
693 case ICE_MAX_AGG_BW_200G:
694 case ICE_MAX_AGG_BW_100G:
695 case ICE_MAX_AGG_BW_50G:
696 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
697 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
699 case ICE_MAX_AGG_BW_25G:
700 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
701 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
707 * ice_print_rollback_msg - print FW rollback message
708 * @hw: pointer to the hardware structure
710 void ice_print_rollback_msg(struct ice_hw *hw)
712 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
713 struct ice_orom_info *orom;
714 struct ice_nvm_info *nvm;
716 orom = &hw->flash.orom;
717 nvm = &hw->flash.nvm;
719 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
720 nvm->major, nvm->minor, nvm->eetrack, orom->major,
721 orom->build, orom->patch);
723 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
724 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
728 * ice_init_hw - main hardware initialization routine
729 * @hw: pointer to the hardware structure
731 enum ice_status ice_init_hw(struct ice_hw *hw)
733 struct ice_aqc_get_phy_caps_data *pcaps;
734 enum ice_status status;
738 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
740 /* Set MAC type based on DeviceID */
741 status = ice_set_mac_type(hw);
745 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
746 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
747 PF_FUNC_RID_FUNCTION_NUMBER_S;
749 status = ice_reset(hw, ICE_RESET_PFR);
753 ice_get_itr_intrl_gran(hw);
755 status = ice_create_all_ctrlq(hw);
757 goto err_unroll_cqinit;
759 status = ice_init_nvm(hw);
761 goto err_unroll_cqinit;
763 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
764 ice_print_rollback_msg(hw);
766 status = ice_clear_pf_cfg(hw);
768 goto err_unroll_cqinit;
770 /* Set bit to enable Flow Director filters */
771 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
772 INIT_LIST_HEAD(&hw->fdir_list_head);
774 ice_clear_pxe_mode(hw);
776 status = ice_get_caps(hw);
778 goto err_unroll_cqinit;
780 hw->port_info = (struct ice_port_info *)
781 ice_malloc(hw, sizeof(*hw->port_info));
782 if (!hw->port_info) {
783 status = ICE_ERR_NO_MEMORY;
784 goto err_unroll_cqinit;
787 /* set the back pointer to HW */
788 hw->port_info->hw = hw;
790 /* Initialize port_info struct with switch configuration data */
791 status = ice_get_initial_sw_cfg(hw);
793 goto err_unroll_alloc;
796 /* Query the allocated resources for Tx scheduler */
797 status = ice_sched_query_res_alloc(hw);
799 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
800 goto err_unroll_alloc;
802 ice_sched_get_psm_clk_freq(hw);
804 /* Initialize port_info struct with scheduler data */
805 status = ice_sched_init_port(hw->port_info);
807 goto err_unroll_sched;
808 pcaps = (struct ice_aqc_get_phy_caps_data *)
809 ice_malloc(hw, sizeof(*pcaps));
811 status = ICE_ERR_NO_MEMORY;
812 goto err_unroll_sched;
815 /* Initialize port_info struct with PHY capabilities */
816 status = ice_aq_get_phy_caps(hw->port_info, false,
817 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
820 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
823 /* Initialize port_info struct with link information */
824 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
826 goto err_unroll_sched;
827 /* need a valid SW entry point to build a Tx tree */
828 if (!hw->sw_entry_point_layer) {
829 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
830 status = ICE_ERR_CFG;
831 goto err_unroll_sched;
833 INIT_LIST_HEAD(&hw->agg_list);
834 /* Initialize max burst size */
835 if (!hw->max_burst_size)
836 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
837 status = ice_init_fltr_mgmt_struct(hw);
839 goto err_unroll_sched;
841 /* Get MAC information */
842 /* A single port can report up to two (LAN and WoL) addresses */
843 mac_buf = ice_calloc(hw, 2,
844 sizeof(struct ice_aqc_manage_mac_read_resp));
845 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
848 status = ICE_ERR_NO_MEMORY;
849 goto err_unroll_fltr_mgmt_struct;
852 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
853 ice_free(hw, mac_buf);
856 goto err_unroll_fltr_mgmt_struct;
857 /* Obtain counter base index which would be used by flow director */
858 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
860 goto err_unroll_fltr_mgmt_struct;
861 status = ice_init_hw_tbls(hw);
863 goto err_unroll_fltr_mgmt_struct;
864 ice_init_lock(&hw->tnl_lock);
868 err_unroll_fltr_mgmt_struct:
869 ice_cleanup_fltr_mgmt_struct(hw);
871 ice_sched_cleanup_all(hw);
873 ice_free(hw, hw->port_info);
874 hw->port_info = NULL;
876 ice_destroy_all_ctrlq(hw);
881 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
882 * @hw: pointer to the hardware structure
884 * This should be called only during nominal operation, not as a result of
885 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
886 * applicable initializations if it fails for any reason.
888 void ice_deinit_hw(struct ice_hw *hw)
890 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
891 ice_cleanup_fltr_mgmt_struct(hw);
893 ice_sched_cleanup_all(hw);
894 ice_sched_clear_agg(hw);
896 ice_free_hw_tbls(hw);
897 ice_destroy_lock(&hw->tnl_lock);
900 ice_free(hw, hw->port_info);
901 hw->port_info = NULL;
904 ice_destroy_all_ctrlq(hw);
906 /* Clear VSI contexts if not already cleared */
907 ice_clear_all_vsi_ctx(hw);
911 * ice_check_reset - Check to see if a global reset is complete
912 * @hw: pointer to the hardware structure
914 enum ice_status ice_check_reset(struct ice_hw *hw)
916 u32 cnt, reg = 0, grst_timeout, uld_mask;
918 /* Poll for Device Active state in case a recent CORER, GLOBR,
919 * or EMPR has occurred. The grst delay value is in 100ms units.
920 * Add 1sec for outstanding AQ commands that can take a long time.
922 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
923 GLGEN_RSTCTL_GRSTDEL_S) + 10;
925 for (cnt = 0; cnt < grst_timeout; cnt++) {
926 ice_msec_delay(100, true);
927 reg = rd32(hw, GLGEN_RSTAT);
928 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
932 if (cnt == grst_timeout) {
933 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
934 return ICE_ERR_RESET_FAILED;
937 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
938 GLNVM_ULD_PCIER_DONE_1_M |\
939 GLNVM_ULD_CORER_DONE_M |\
940 GLNVM_ULD_GLOBR_DONE_M |\
941 GLNVM_ULD_POR_DONE_M |\
942 GLNVM_ULD_POR_DONE_1_M |\
943 GLNVM_ULD_PCIER_DONE_2_M)
945 uld_mask = ICE_RESET_DONE_MASK;
947 /* Device is Active; check Global Reset processes are done */
948 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
949 reg = rd32(hw, GLNVM_ULD) & uld_mask;
950 if (reg == uld_mask) {
951 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
954 ice_msec_delay(10, true);
957 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
958 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
960 return ICE_ERR_RESET_FAILED;
967 * ice_pf_reset - Reset the PF
968 * @hw: pointer to the hardware structure
970 * If a global reset has been triggered, this function checks
971 * for its completion and then issues the PF reset
973 static enum ice_status ice_pf_reset(struct ice_hw *hw)
977 /* If at function entry a global reset was already in progress, i.e.
978 * state is not 'device active' or any of the reset done bits are not
979 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
980 * global reset is done.
982 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
983 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
984 /* poll on global reset currently in progress until done */
985 if (ice_check_reset(hw))
986 return ICE_ERR_RESET_FAILED;
992 reg = rd32(hw, PFGEN_CTRL);
994 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
996 /* Wait for the PFR to complete. The wait time is the global config lock
997 * timeout plus the PFR timeout which will account for a possible reset
998 * that is occurring during a download package operation.
1000 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1001 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1002 reg = rd32(hw, PFGEN_CTRL);
1003 if (!(reg & PFGEN_CTRL_PFSWR_M))
1006 ice_msec_delay(1, true);
1009 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1010 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1011 return ICE_ERR_RESET_FAILED;
1018 * ice_reset - Perform different types of reset
1019 * @hw: pointer to the hardware structure
1020 * @req: reset request
1022 * This function triggers a reset as specified by the req parameter.
1025 * If anything other than a PF reset is triggered, PXE mode is restored.
1026 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1027 * interface has been restored in the rebuild flow.
1029 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1035 return ice_pf_reset(hw);
1036 case ICE_RESET_CORER:
1037 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1038 val = GLGEN_RTRIG_CORER_M;
1040 case ICE_RESET_GLOBR:
1041 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1042 val = GLGEN_RTRIG_GLOBR_M;
1045 return ICE_ERR_PARAM;
1048 val |= rd32(hw, GLGEN_RTRIG);
1049 wr32(hw, GLGEN_RTRIG, val);
1052 /* wait for the FW to be ready */
1053 return ice_check_reset(hw);
1057 * ice_copy_rxq_ctx_to_hw
1058 * @hw: pointer to the hardware structure
1059 * @ice_rxq_ctx: pointer to the rxq context
1060 * @rxq_index: the index of the Rx queue
1062 * Copies rxq context from dense structure to HW register space
1064 static enum ice_status
1065 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1070 return ICE_ERR_BAD_PTR;
1072 if (rxq_index > QRX_CTRL_MAX_INDEX)
1073 return ICE_ERR_PARAM;
1075 /* Copy each dword separately to HW */
1076 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1077 wr32(hw, QRX_CONTEXT(i, rxq_index),
1078 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1080 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1081 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1087 /* LAN Rx Queue Context */
1088 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1089 /* Field Width LSB */
1090 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1091 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1092 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1093 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1094 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1095 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1096 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1097 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1098 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1099 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1100 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1101 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1102 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1103 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1104 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1105 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1106 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1107 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1108 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1109 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1115 * @hw: pointer to the hardware structure
1116 * @rlan_ctx: pointer to the rxq context
1117 * @rxq_index: the index of the Rx queue
1119 * Converts rxq context from sparse to dense structure and then writes
1120 * it to HW register space and enables the hardware to prefetch descriptors
1121 * instead of only fetching them on demand
1124 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1127 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1130 return ICE_ERR_BAD_PTR;
1132 rlan_ctx->prefena = 1;
1134 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1135 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1140 * @hw: pointer to the hardware structure
1141 * @rxq_index: the index of the Rx queue to clear
1143 * Clears rxq context in HW register space
1145 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1149 if (rxq_index > QRX_CTRL_MAX_INDEX)
1150 return ICE_ERR_PARAM;
1152 /* Clear each dword register separately */
1153 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1154 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1159 /* LAN Tx Queue Context */
1160 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1161 /* Field Width LSB */
1162 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1163 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1164 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1165 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1166 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1167 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1168 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1169 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1170 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1171 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1172 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1173 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1174 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1175 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1176 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1177 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1178 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1179 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1180 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1181 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1182 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1183 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1184 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1185 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1186 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1187 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1188 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1189 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1194 * ice_copy_tx_cmpltnq_ctx_to_hw
1195 * @hw: pointer to the hardware structure
1196 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1197 * @tx_cmpltnq_index: the index of the completion queue
1199 * Copies Tx completion queue context from dense structure to HW register space
1201 static enum ice_status
1202 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1203 u32 tx_cmpltnq_index)
1207 if (!ice_tx_cmpltnq_ctx)
1208 return ICE_ERR_BAD_PTR;
1210 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1211 return ICE_ERR_PARAM;
1213 /* Copy each dword separately to HW */
1214 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1215 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1216 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1218 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1219 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1225 /* LAN Tx Completion Queue Context */
1226 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1227 /* Field Width LSB */
1228 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1229 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1230 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1231 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1232 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1233 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1234 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1235 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1236 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1237 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1242 * ice_write_tx_cmpltnq_ctx
1243 * @hw: pointer to the hardware structure
1244 * @tx_cmpltnq_ctx: pointer to the completion queue context
1245 * @tx_cmpltnq_index: the index of the completion queue
1247 * Converts completion queue context from sparse to dense structure and then
1248 * writes it to HW register space
1251 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1252 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1253 u32 tx_cmpltnq_index)
1255 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1257 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1258 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1262 * ice_clear_tx_cmpltnq_ctx
1263 * @hw: pointer to the hardware structure
1264 * @tx_cmpltnq_index: the index of the completion queue to clear
1266 * Clears Tx completion queue context in HW register space
1269 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1273 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1274 return ICE_ERR_PARAM;
1276 /* Clear each dword register separately */
1277 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1278 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1284 * ice_copy_tx_drbell_q_ctx_to_hw
1285 * @hw: pointer to the hardware structure
1286 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1287 * @tx_drbell_q_index: the index of the doorbell queue
1289 * Copies doorbell queue context from dense structure to HW register space
1291 static enum ice_status
1292 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1293 u32 tx_drbell_q_index)
1297 if (!ice_tx_drbell_q_ctx)
1298 return ICE_ERR_BAD_PTR;
1300 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1301 return ICE_ERR_PARAM;
1303 /* Copy each dword separately to HW */
1304 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1305 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1306 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1308 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1309 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1315 /* LAN Tx Doorbell Queue Context info */
1316 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1317 /* Field Width LSB */
1318 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1319 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1320 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1321 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1322 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1323 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1324 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1325 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1326 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1327 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1328 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1333 * ice_write_tx_drbell_q_ctx
1334 * @hw: pointer to the hardware structure
1335 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1336 * @tx_drbell_q_index: the index of the doorbell queue
1338 * Converts doorbell queue context from sparse to dense structure and then
1339 * writes it to HW register space
1342 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1343 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1344 u32 tx_drbell_q_index)
1346 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1348 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1349 ice_tx_drbell_q_ctx_info);
1350 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1354 * ice_clear_tx_drbell_q_ctx
1355 * @hw: pointer to the hardware structure
1356 * @tx_drbell_q_index: the index of the doorbell queue to clear
1358 * Clears doorbell queue context in HW register space
1361 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1365 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1366 return ICE_ERR_PARAM;
1368 /* Clear each dword register separately */
1369 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1370 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1375 /* Sideband Queue command wrappers */
1378 * ice_get_sbq - returns the right control queue to use for sideband
1379 * @hw: pointer to the hardware structure
1381 static struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
1383 if (!ice_is_generic_mac(hw))
1389 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1390 * @hw: pointer to the HW struct
1391 * @desc: descriptor describing the command
1392 * @buf: buffer to use for indirect commands (NULL for direct commands)
1393 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1394 * @cd: pointer to command details structure
1396 static enum ice_status
1397 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1398 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1400 return ice_sq_send_cmd(hw, ice_get_sbq(hw), (struct ice_aq_desc *)desc,
1405 * ice_sbq_send_cmd_nolock - send Sideband Queue command to Sideband Queue
1406 * but do not lock sq_lock
1407 * @hw: pointer to the HW struct
1408 * @desc: descriptor describing the command
1409 * @buf: buffer to use for indirect commands (NULL for direct commands)
1410 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1411 * @cd: pointer to command details structure
1413 static enum ice_status
1414 ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1415 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1417 return ice_sq_send_cmd_nolock(hw, ice_get_sbq(hw),
1418 (struct ice_aq_desc *)desc, buf,
1423 * ice_sbq_rw_reg_lp - Fill Sideband Queue command, with lock parameter
1424 * @hw: pointer to the HW struct
1425 * @in: message info to be filled in descriptor
1426 * @lock: true to lock the sq_lock (the usual case); false if the sq_lock has
1427 * already been locked at a higher level
1429 enum ice_status ice_sbq_rw_reg_lp(struct ice_hw *hw,
1430 struct ice_sbq_msg_input *in, bool lock)
1432 struct ice_sbq_cmd_desc desc = {0};
1433 struct ice_sbq_msg_req msg = {0};
1434 enum ice_status status;
1437 msg_len = sizeof(msg);
1439 msg.dest_dev = in->dest_dev;
1440 msg.opcode = in->opcode;
1441 msg.flags = ICE_SBQ_MSG_FLAGS;
1442 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1443 msg.msg_addr_low = CPU_TO_LE16(in->msg_addr_low);
1444 msg.msg_addr_high = CPU_TO_LE32(in->msg_addr_high);
1447 msg.data = CPU_TO_LE32(in->data);
1449 /* data read comes back in completion, so shorten the struct by
1452 msg_len -= sizeof(msg.data);
1454 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
1455 desc.opcode = CPU_TO_LE16(ice_sbq_opc_neigh_dev_req);
1456 desc.param0.cmd_len = CPU_TO_LE16(msg_len);
1458 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1460 status = ice_sbq_send_cmd_nolock(hw, &desc, &msg, msg_len,
1462 if (!status && !in->opcode)
1463 in->data = LE32_TO_CPU
1464 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1469 * ice_sbq_rw_reg - Fill Sideband Queue command
1470 * @hw: pointer to the HW struct
1471 * @in: message info to be filled in descriptor
1473 enum ice_status ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1475 return ice_sbq_rw_reg_lp(hw, in, true);
1479 * ice_sbq_lock - Lock the sideband queue's sq_lock
1480 * @hw: pointer to the HW struct
1482 void ice_sbq_lock(struct ice_hw *hw)
1484 ice_acquire_lock(&ice_get_sbq(hw)->sq_lock);
1488 * ice_sbq_unlock - Unlock the sideband queue's sq_lock
1489 * @hw: pointer to the HW struct
1491 void ice_sbq_unlock(struct ice_hw *hw)
1493 ice_release_lock(&ice_get_sbq(hw)->sq_lock);
1496 /* FW Admin Queue command wrappers */
1499 * ice_should_retry_sq_send_cmd
1500 * @opcode: AQ opcode
1502 * Decide if we should retry the send command routine for the ATQ, depending
1505 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1508 case ice_aqc_opc_get_link_topo:
1509 case ice_aqc_opc_lldp_stop:
1510 case ice_aqc_opc_lldp_start:
1511 case ice_aqc_opc_lldp_filter_ctrl:
1519 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1520 * @hw: pointer to the HW struct
1521 * @cq: pointer to the specific Control queue
1522 * @desc: prefilled descriptor describing the command
1523 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1524 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1525 * @cd: pointer to command details structure
1527 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1528 * Queue if the EBUSY AQ error is returned.
1530 static enum ice_status
1531 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1532 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1533 struct ice_sq_cd *cd)
1535 struct ice_aq_desc desc_cpy;
1536 enum ice_status status;
1537 bool is_cmd_for_retry;
1542 opcode = LE16_TO_CPU(desc->opcode);
1543 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1544 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1546 if (is_cmd_for_retry) {
1548 buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1550 return ICE_ERR_NO_MEMORY;
1553 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1554 ICE_NONDMA_TO_NONDMA);
1558 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1560 if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1561 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1565 ice_memcpy(buf, buf_cpy, buf_size,
1566 ICE_NONDMA_TO_NONDMA);
1568 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1569 ICE_NONDMA_TO_NONDMA);
1571 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1573 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1576 ice_free(hw, buf_cpy);
1582 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1583 * @hw: pointer to the HW struct
1584 * @desc: descriptor describing the command
1585 * @buf: buffer to use for indirect commands (NULL for direct commands)
1586 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1587 * @cd: pointer to command details structure
1589 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1592 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1593 u16 buf_size, struct ice_sq_cd *cd)
1595 if (hw->aq_send_cmd_fn) {
1596 enum ice_status status = ICE_ERR_NOT_READY;
1597 u16 retval = ICE_AQ_RC_OK;
1599 ice_acquire_lock(&hw->adminq.sq_lock);
1600 if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
1602 retval = LE16_TO_CPU(desc->retval);
1603 /* strip off FW internal code */
1606 if (retval == ICE_AQ_RC_OK)
1607 status = ICE_SUCCESS;
1609 status = ICE_ERR_AQ_ERROR;
1612 hw->adminq.sq_last_status = (enum ice_aq_err)retval;
1613 ice_release_lock(&hw->adminq.sq_lock);
1617 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1622 * @hw: pointer to the HW struct
1623 * @cd: pointer to command details structure or NULL
1625 * Get the firmware version (0x0001) from the admin queue commands
1627 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1629 struct ice_aqc_get_ver *resp;
1630 struct ice_aq_desc desc;
1631 enum ice_status status;
1633 resp = &desc.params.get_ver;
1635 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1637 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1640 hw->fw_branch = resp->fw_branch;
1641 hw->fw_maj_ver = resp->fw_major;
1642 hw->fw_min_ver = resp->fw_minor;
1643 hw->fw_patch = resp->fw_patch;
1644 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1645 hw->api_branch = resp->api_branch;
1646 hw->api_maj_ver = resp->api_major;
1647 hw->api_min_ver = resp->api_minor;
1648 hw->api_patch = resp->api_patch;
1655 * ice_aq_send_driver_ver
1656 * @hw: pointer to the HW struct
1657 * @dv: driver's major, minor version
1658 * @cd: pointer to command details structure or NULL
1660 * Send the driver version (0x0002) to the firmware
1663 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1664 struct ice_sq_cd *cd)
1666 struct ice_aqc_driver_ver *cmd;
1667 struct ice_aq_desc desc;
1670 cmd = &desc.params.driver_ver;
1673 return ICE_ERR_PARAM;
1675 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1677 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1678 cmd->major_ver = dv->major_ver;
1679 cmd->minor_ver = dv->minor_ver;
1680 cmd->build_ver = dv->build_ver;
1681 cmd->subbuild_ver = dv->subbuild_ver;
1684 while (len < sizeof(dv->driver_string) &&
1685 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1688 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1693 * @hw: pointer to the HW struct
1694 * @unloading: is the driver unloading itself
1696 * Tell the Firmware that we're shutting down the AdminQ and whether
1697 * or not the driver is unloading as well (0x0003).
1699 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1701 struct ice_aqc_q_shutdown *cmd;
1702 struct ice_aq_desc desc;
1704 cmd = &desc.params.q_shutdown;
1706 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1709 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1711 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1716 * @hw: pointer to the HW struct
1718 * @access: access type
1719 * @sdp_number: resource number
1720 * @timeout: the maximum time in ms that the driver may hold the resource
1721 * @cd: pointer to command details structure or NULL
1723 * Requests common resource using the admin queue commands (0x0008).
1724 * When attempting to acquire the Global Config Lock, the driver can
1725 * learn of three states:
1726 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1727 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1728 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1729 * successfully downloaded the package; the driver does
1730 * not have to download the package and can continue
1733 * Note that if the caller is in an acquire lock, perform action, release lock
1734 * phase of operation, it is possible that the FW may detect a timeout and issue
1735 * a CORER. In this case, the driver will receive a CORER interrupt and will
1736 * have to determine its cause. The calling thread that is handling this flow
1737 * will likely get an error propagated back to it indicating the Download
1738 * Package, Update Package or the Release Resource AQ commands timed out.
1740 static enum ice_status
1741 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1742 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1743 struct ice_sq_cd *cd)
1745 struct ice_aqc_req_res *cmd_resp;
1746 struct ice_aq_desc desc;
1747 enum ice_status status;
1749 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1751 cmd_resp = &desc.params.res_owner;
1753 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1755 cmd_resp->res_id = CPU_TO_LE16(res);
1756 cmd_resp->access_type = CPU_TO_LE16(access);
1757 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1758 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1761 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1763 /* The completion specifies the maximum time in ms that the driver
1764 * may hold the resource in the Timeout field.
1767 /* Global config lock response utilizes an additional status field.
1769 * If the Global config lock resource is held by some other driver, the
1770 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1771 * and the timeout field indicates the maximum time the current owner
1772 * of the resource has to free it.
1774 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1775 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1776 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1778 } else if (LE16_TO_CPU(cmd_resp->status) ==
1779 ICE_AQ_RES_GLBL_IN_PROG) {
1780 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1781 return ICE_ERR_AQ_ERROR;
1782 } else if (LE16_TO_CPU(cmd_resp->status) ==
1783 ICE_AQ_RES_GLBL_DONE) {
1784 return ICE_ERR_AQ_NO_WORK;
1787 /* invalid FW response, force a timeout immediately */
1789 return ICE_ERR_AQ_ERROR;
1792 /* If the resource is held by some other driver, the command completes
1793 * with a busy return value and the timeout field indicates the maximum
1794 * time the current owner of the resource has to free it.
1796 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1797 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1803 * ice_aq_release_res
1804 * @hw: pointer to the HW struct
1806 * @sdp_number: resource number
1807 * @cd: pointer to command details structure or NULL
1809 * release common resource using the admin queue commands (0x0009)
1811 static enum ice_status
1812 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1813 struct ice_sq_cd *cd)
1815 struct ice_aqc_req_res *cmd;
1816 struct ice_aq_desc desc;
1818 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1820 cmd = &desc.params.res_owner;
1822 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1824 cmd->res_id = CPU_TO_LE16(res);
1825 cmd->res_number = CPU_TO_LE32(sdp_number);
1827 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1832 * @hw: pointer to the HW structure
1834 * @access: access type (read or write)
1835 * @timeout: timeout in milliseconds
1837 * This function will attempt to acquire the ownership of a resource.
1840 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1841 enum ice_aq_res_access_type access, u32 timeout)
1843 #define ICE_RES_POLLING_DELAY_MS 10
1844 u32 delay = ICE_RES_POLLING_DELAY_MS;
1845 u32 time_left = timeout;
1846 enum ice_status status;
1848 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1850 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1852 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1853 * previously acquired the resource and performed any necessary updates;
1854 * in this case the caller does not obtain the resource and has no
1855 * further work to do.
1857 if (status == ICE_ERR_AQ_NO_WORK)
1858 goto ice_acquire_res_exit;
1861 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1863 /* If necessary, poll until the current lock owner timeouts */
1864 timeout = time_left;
1865 while (status && timeout && time_left) {
1866 ice_msec_delay(delay, true);
1867 timeout = (timeout > delay) ? timeout - delay : 0;
1868 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1870 if (status == ICE_ERR_AQ_NO_WORK)
1871 /* lock free, but no work to do */
1878 if (status && status != ICE_ERR_AQ_NO_WORK)
1879 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1881 ice_acquire_res_exit:
1882 if (status == ICE_ERR_AQ_NO_WORK) {
1883 if (access == ICE_RES_WRITE)
1884 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1886 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1893 * @hw: pointer to the HW structure
1896 * This function will release a resource using the proper Admin Command.
1898 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1900 enum ice_status status;
1901 u32 total_delay = 0;
1903 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1905 status = ice_aq_release_res(hw, res, 0, NULL);
1907 /* there are some rare cases when trying to release the resource
1908 * results in an admin queue timeout, so handle them correctly
1910 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1911 (total_delay < hw->adminq.sq_cmd_timeout)) {
1912 ice_msec_delay(1, true);
1913 status = ice_aq_release_res(hw, res, 0, NULL);
1919 * ice_aq_alloc_free_res - command to allocate/free resources
1920 * @hw: pointer to the HW struct
1921 * @num_entries: number of resource entries in buffer
1922 * @buf: Indirect buffer to hold data parameters and response
1923 * @buf_size: size of buffer for indirect commands
1924 * @opc: pass in the command opcode
1925 * @cd: pointer to command details structure or NULL
1927 * Helper function to allocate/free resources using the admin queue commands
1930 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1931 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1932 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1934 struct ice_aqc_alloc_free_res_cmd *cmd;
1935 struct ice_aq_desc desc;
1937 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1939 cmd = &desc.params.sw_res_ctrl;
1942 return ICE_ERR_PARAM;
1944 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
1945 return ICE_ERR_PARAM;
1947 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1949 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1951 cmd->num_entries = CPU_TO_LE16(num_entries);
1953 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1957 * ice_alloc_hw_res - allocate resource
1958 * @hw: pointer to the HW struct
1959 * @type: type of resource
1960 * @num: number of resources to allocate
1961 * @btm: allocate from bottom
1962 * @res: pointer to array that will receive the resources
1965 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1967 struct ice_aqc_alloc_free_res_elem *buf;
1968 enum ice_status status;
1971 buf_len = ice_struct_size(buf, elem, num);
1972 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1974 return ICE_ERR_NO_MEMORY;
1976 /* Prepare buffer to allocate resource. */
1977 buf->num_elems = CPU_TO_LE16(num);
1978 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1979 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1981 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1983 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1984 ice_aqc_opc_alloc_res, NULL);
1986 goto ice_alloc_res_exit;
1988 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
1989 ICE_NONDMA_TO_NONDMA);
1997 * ice_free_hw_res - free allocated HW resource
1998 * @hw: pointer to the HW struct
1999 * @type: type of resource to free
2000 * @num: number of resources
2001 * @res: pointer to array that contains the resources to free
2003 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2005 struct ice_aqc_alloc_free_res_elem *buf;
2006 enum ice_status status;
2009 buf_len = ice_struct_size(buf, elem, num);
2010 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2012 return ICE_ERR_NO_MEMORY;
2014 /* Prepare buffer to free resource. */
2015 buf->num_elems = CPU_TO_LE16(num);
2016 buf->res_type = CPU_TO_LE16(type);
2017 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2018 ICE_NONDMA_TO_NONDMA);
2020 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2021 ice_aqc_opc_free_res, NULL);
2023 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2030 * ice_get_num_per_func - determine number of resources per PF
2031 * @hw: pointer to the HW structure
2032 * @max: value to be evenly split between each PF
2034 * Determine the number of valid functions by going through the bitmap returned
2035 * from parsing capabilities and use this to calculate the number of resources
2036 * per PF based on the max value passed in.
2038 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2042 #define ICE_CAPS_VALID_FUNCS_M 0xFF
2043 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2044 ICE_CAPS_VALID_FUNCS_M);
2053 * ice_parse_common_caps - parse common device/function capabilities
2054 * @hw: pointer to the HW struct
2055 * @caps: pointer to common capabilities structure
2056 * @elem: the capability element to parse
2057 * @prefix: message prefix for tracing capabilities
2059 * Given a capability element, extract relevant details into the common
2060 * capability structure.
2062 * Returns: true if the capability matches one of the common capability ids,
2066 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2067 struct ice_aqc_list_caps_elem *elem, const char *prefix)
2069 u32 logical_id = LE32_TO_CPU(elem->logical_id);
2070 u32 phys_id = LE32_TO_CPU(elem->phys_id);
2071 u32 number = LE32_TO_CPU(elem->number);
2072 u16 cap = LE16_TO_CPU(elem->cap);
2076 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2077 caps->valid_functions = number;
2078 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2079 caps->valid_functions);
2081 case ICE_AQC_CAPS_DCB:
2082 caps->dcb = (number == 1);
2083 caps->active_tc_bitmap = logical_id;
2084 caps->maxtc = phys_id;
2085 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2086 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2087 caps->active_tc_bitmap);
2088 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2090 case ICE_AQC_CAPS_RSS:
2091 caps->rss_table_size = number;
2092 caps->rss_table_entry_width = logical_id;
2093 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2094 caps->rss_table_size);
2095 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2096 caps->rss_table_entry_width);
2098 case ICE_AQC_CAPS_RXQS:
2099 caps->num_rxq = number;
2100 caps->rxq_first_id = phys_id;
2101 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2103 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2104 caps->rxq_first_id);
2106 case ICE_AQC_CAPS_TXQS:
2107 caps->num_txq = number;
2108 caps->txq_first_id = phys_id;
2109 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2111 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2112 caps->txq_first_id);
2114 case ICE_AQC_CAPS_MSIX:
2115 caps->num_msix_vectors = number;
2116 caps->msix_vector_first_id = phys_id;
2117 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2118 caps->num_msix_vectors);
2119 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2120 caps->msix_vector_first_id);
2122 case ICE_AQC_CAPS_NVM_MGMT:
2123 caps->sec_rev_disabled =
2124 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2126 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2127 caps->sec_rev_disabled);
2128 caps->update_disabled =
2129 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2131 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2132 caps->update_disabled);
2133 caps->nvm_unified_update =
2134 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2136 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2137 caps->nvm_unified_update);
2139 case ICE_AQC_CAPS_MAX_MTU:
2140 caps->max_mtu = number;
2141 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2142 prefix, caps->max_mtu);
2144 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2145 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2146 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2147 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2149 u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
2151 caps->ext_topo_dev_img_ver_high[index] = number;
2152 caps->ext_topo_dev_img_ver_low[index] = logical_id;
2153 caps->ext_topo_dev_img_part_num[index] =
2154 (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2155 ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2156 caps->ext_topo_dev_img_load_en[index] =
2157 (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2158 caps->ext_topo_dev_img_prog_en[index] =
2159 (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2160 ice_debug(hw, ICE_DBG_INIT,
2161 "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
2163 caps->ext_topo_dev_img_ver_high[index]);
2164 ice_debug(hw, ICE_DBG_INIT,
2165 "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
2167 caps->ext_topo_dev_img_ver_low[index]);
2168 ice_debug(hw, ICE_DBG_INIT,
2169 "%s: ext_topo_dev_img_part_num[%d] = %d\n",
2171 caps->ext_topo_dev_img_part_num[index]);
2172 ice_debug(hw, ICE_DBG_INIT,
2173 "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2175 caps->ext_topo_dev_img_load_en[index]);
2176 ice_debug(hw, ICE_DBG_INIT,
2177 "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2179 caps->ext_topo_dev_img_prog_en[index]);
2183 /* Not one of the recognized common capabilities */
2191 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2192 * @hw: pointer to the HW structure
2193 * @caps: pointer to capabilities structure to fix
2195 * Re-calculate the capabilities that are dependent on the number of physical
2196 * ports; i.e. some features are not supported or function differently on
2197 * devices with more than 4 ports.
2200 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2202 /* This assumes device capabilities are always scanned before function
2203 * capabilities during the initialization flow.
2205 if (hw->dev_caps.num_funcs > 4) {
2206 /* Max 4 TCs per port */
2208 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2214 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2215 * @hw: pointer to the HW struct
2216 * @func_p: pointer to function capabilities structure
2217 * @cap: pointer to the capability element to parse
2219 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2222 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2223 struct ice_aqc_list_caps_elem *cap)
2225 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2226 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2227 LE32_TO_CPU(cap->number));
2228 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2229 func_p->guar_num_vsi);
2233 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2234 * @hw: pointer to the HW struct
2235 * @func_p: pointer to function capabilities structure
2236 * @cap: pointer to the capability element to parse
2238 * Extract function capabilities for ICE_AQC_CAPS_1588.
2241 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2242 struct ice_aqc_list_caps_elem *cap)
2244 struct ice_ts_func_info *info = &func_p->ts_func_info;
2245 u32 number = LE32_TO_CPU(cap->number);
2247 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2248 func_p->common_cap.ieee_1588 = info->ena;
2250 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2251 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2252 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2253 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2255 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2256 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2258 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2259 info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2261 /* Unknown clock frequency, so assume a (probably incorrect)
2262 * default to avoid out-of-bounds look ups of frequency
2263 * related information.
2265 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2267 info->time_ref = ICE_TIME_REF_FREQ_25_000;
2270 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2271 func_p->common_cap.ieee_1588);
2272 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2273 info->src_tmr_owned);
2274 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2276 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2277 info->tmr_index_owned);
2278 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2279 info->tmr_index_assoc);
2280 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2282 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2287 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2288 * @hw: pointer to the HW struct
2289 * @func_p: pointer to function capabilities structure
2291 * Extract function capabilities for ICE_AQC_CAPS_FD.
2294 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2298 if (hw->dcf_enabled)
2300 reg_val = rd32(hw, GLQF_FD_SIZE);
2301 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2302 GLQF_FD_SIZE_FD_GSIZE_S;
2303 func_p->fd_fltr_guar =
2304 ice_get_num_per_func(hw, val);
2305 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2306 GLQF_FD_SIZE_FD_BSIZE_S;
2307 func_p->fd_fltr_best_effort = val;
2309 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2310 func_p->fd_fltr_guar);
2311 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2312 func_p->fd_fltr_best_effort);
2316 * ice_parse_func_caps - Parse function capabilities
2317 * @hw: pointer to the HW struct
2318 * @func_p: pointer to function capabilities structure
2319 * @buf: buffer containing the function capability records
2320 * @cap_count: the number of capabilities
2322 * Helper function to parse function (0x000A) capabilities list. For
2323 * capabilities shared between device and function, this relies on
2324 * ice_parse_common_caps.
2326 * Loop through the list of provided capabilities and extract the relevant
2327 * data into the function capabilities structured.
2330 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2331 void *buf, u32 cap_count)
2333 struct ice_aqc_list_caps_elem *cap_resp;
2336 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2338 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2340 for (i = 0; i < cap_count; i++) {
2341 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2344 found = ice_parse_common_caps(hw, &func_p->common_cap,
2345 &cap_resp[i], "func caps");
2348 case ICE_AQC_CAPS_VSI:
2349 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2351 case ICE_AQC_CAPS_1588:
2352 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2354 case ICE_AQC_CAPS_FD:
2355 ice_parse_fdir_func_caps(hw, func_p);
2358 /* Don't list common capabilities as unknown */
2360 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2366 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2370 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2371 * @hw: pointer to the HW struct
2372 * @dev_p: pointer to device capabilities structure
2373 * @cap: capability element to parse
2375 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2378 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2379 struct ice_aqc_list_caps_elem *cap)
2381 u32 number = LE32_TO_CPU(cap->number);
2383 dev_p->num_funcs = ice_hweight32(number);
2384 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2389 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2390 * @hw: pointer to the HW struct
2391 * @dev_p: pointer to device capabilities structure
2392 * @cap: capability element to parse
2394 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2397 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2398 struct ice_aqc_list_caps_elem *cap)
2400 u32 number = LE32_TO_CPU(cap->number);
2402 dev_p->num_vsi_allocd_to_host = number;
2403 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2404 dev_p->num_vsi_allocd_to_host);
2408 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2409 * @hw: pointer to the HW struct
2410 * @dev_p: pointer to device capabilities structure
2411 * @cap: capability element to parse
2413 * Parse ICE_AQC_CAPS_1588 for device capabilities.
2416 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2417 struct ice_aqc_list_caps_elem *cap)
2419 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2420 u32 logical_id = LE32_TO_CPU(cap->logical_id);
2421 u32 phys_id = LE32_TO_CPU(cap->phys_id);
2422 u32 number = LE32_TO_CPU(cap->number);
2424 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2425 dev_p->common_cap.ieee_1588 = info->ena;
2427 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2428 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2429 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2431 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2432 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2433 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2435 info->ena_ports = logical_id;
2436 info->tmr_own_map = phys_id;
2438 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2439 dev_p->common_cap.ieee_1588);
2440 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2442 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2444 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2446 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2448 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2450 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2452 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2454 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2459 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2460 * @hw: pointer to the HW struct
2461 * @dev_p: pointer to device capabilities structure
2462 * @cap: capability element to parse
2464 * Parse ICE_AQC_CAPS_FD for device capabilities.
2467 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2468 struct ice_aqc_list_caps_elem *cap)
2470 u32 number = LE32_TO_CPU(cap->number);
2472 dev_p->num_flow_director_fltr = number;
2473 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2474 dev_p->num_flow_director_fltr);
2478 * ice_parse_dev_caps - Parse device capabilities
2479 * @hw: pointer to the HW struct
2480 * @dev_p: pointer to device capabilities structure
2481 * @buf: buffer containing the device capability records
2482 * @cap_count: the number of capabilities
2484 * Helper device to parse device (0x000B) capabilities list. For
2485 * capabilities shared between device and function, this relies on
2486 * ice_parse_common_caps.
2488 * Loop through the list of provided capabilities and extract the relevant
2489 * data into the device capabilities structured.
2492 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2493 void *buf, u32 cap_count)
2495 struct ice_aqc_list_caps_elem *cap_resp;
2498 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2500 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2502 for (i = 0; i < cap_count; i++) {
2503 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2506 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2507 &cap_resp[i], "dev caps");
2510 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2511 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2513 case ICE_AQC_CAPS_VSI:
2514 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2516 case ICE_AQC_CAPS_1588:
2517 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2519 case ICE_AQC_CAPS_FD:
2520 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2523 /* Don't list common capabilities as unknown */
2525 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2531 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2535 * ice_aq_list_caps - query function/device capabilities
2536 * @hw: pointer to the HW struct
2537 * @buf: a buffer to hold the capabilities
2538 * @buf_size: size of the buffer
2539 * @cap_count: if not NULL, set to the number of capabilities reported
2540 * @opc: capabilities type to discover, device or function
2541 * @cd: pointer to command details structure or NULL
2543 * Get the function (0x000A) or device (0x000B) capabilities description from
2544 * firmware and store it in the buffer.
2546 * If the cap_count pointer is not NULL, then it is set to the number of
2547 * capabilities firmware will report. Note that if the buffer size is too
2548 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2549 * cap_count will still be updated in this case. It is recommended that the
2550 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2551 * firmware could return) to avoid this.
2553 static enum ice_status
2554 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2555 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2557 struct ice_aqc_list_caps *cmd;
2558 struct ice_aq_desc desc;
2559 enum ice_status status;
2561 cmd = &desc.params.get_cap;
2563 if (opc != ice_aqc_opc_list_func_caps &&
2564 opc != ice_aqc_opc_list_dev_caps)
2565 return ICE_ERR_PARAM;
2567 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2568 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2571 *cap_count = LE32_TO_CPU(cmd->count);
2577 * ice_discover_dev_caps - Read and extract device capabilities
2578 * @hw: pointer to the hardware structure
2579 * @dev_caps: pointer to device capabilities structure
2581 * Read the device capabilities and extract them into the dev_caps structure
2584 static enum ice_status
2585 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2587 enum ice_status status;
2591 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2593 return ICE_ERR_NO_MEMORY;
2595 /* Although the driver doesn't know the number of capabilities the
2596 * device will return, we can simply send a 4KB buffer, the maximum
2597 * possible size that firmware can return.
2599 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2601 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2602 ice_aqc_opc_list_dev_caps, NULL);
2604 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2611 * ice_discover_func_caps - Read and extract function capabilities
2612 * @hw: pointer to the hardware structure
2613 * @func_caps: pointer to function capabilities structure
2615 * Read the function capabilities and extract them into the func_caps structure
2618 static enum ice_status
2619 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2621 enum ice_status status;
2625 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2627 return ICE_ERR_NO_MEMORY;
2629 /* Although the driver doesn't know the number of capabilities the
2630 * device will return, we can simply send a 4KB buffer, the maximum
2631 * possible size that firmware can return.
2633 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2635 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2636 ice_aqc_opc_list_func_caps, NULL);
2638 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2645 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2646 * @hw: pointer to the hardware structure
2648 void ice_set_safe_mode_caps(struct ice_hw *hw)
2650 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2651 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2652 struct ice_hw_common_caps cached_caps;
2655 /* cache some func_caps values that should be restored after memset */
2656 cached_caps = func_caps->common_cap;
2658 /* unset func capabilities */
2659 memset(func_caps, 0, sizeof(*func_caps));
2661 #define ICE_RESTORE_FUNC_CAP(name) \
2662 func_caps->common_cap.name = cached_caps.name
2664 /* restore cached values */
2665 ICE_RESTORE_FUNC_CAP(valid_functions);
2666 ICE_RESTORE_FUNC_CAP(txq_first_id);
2667 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2668 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2669 ICE_RESTORE_FUNC_CAP(max_mtu);
2670 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2672 /* one Tx and one Rx queue in safe mode */
2673 func_caps->common_cap.num_rxq = 1;
2674 func_caps->common_cap.num_txq = 1;
2676 /* two MSIX vectors, one for traffic and one for misc causes */
2677 func_caps->common_cap.num_msix_vectors = 2;
2678 func_caps->guar_num_vsi = 1;
2680 /* cache some dev_caps values that should be restored after memset */
2681 cached_caps = dev_caps->common_cap;
2682 num_funcs = dev_caps->num_funcs;
2684 /* unset dev capabilities */
2685 memset(dev_caps, 0, sizeof(*dev_caps));
2687 #define ICE_RESTORE_DEV_CAP(name) \
2688 dev_caps->common_cap.name = cached_caps.name
2690 /* restore cached values */
2691 ICE_RESTORE_DEV_CAP(valid_functions);
2692 ICE_RESTORE_DEV_CAP(txq_first_id);
2693 ICE_RESTORE_DEV_CAP(rxq_first_id);
2694 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2695 ICE_RESTORE_DEV_CAP(max_mtu);
2696 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2697 dev_caps->num_funcs = num_funcs;
2699 /* one Tx and one Rx queue per function in safe mode */
2700 dev_caps->common_cap.num_rxq = num_funcs;
2701 dev_caps->common_cap.num_txq = num_funcs;
2703 /* two MSIX vectors per function */
2704 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2708 * ice_get_caps - get info about the HW
2709 * @hw: pointer to the hardware structure
2711 enum ice_status ice_get_caps(struct ice_hw *hw)
2713 enum ice_status status;
2715 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2719 return ice_discover_func_caps(hw, &hw->func_caps);
2723 * ice_aq_manage_mac_write - manage MAC address write command
2724 * @hw: pointer to the HW struct
2725 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2726 * @flags: flags to control write behavior
2727 * @cd: pointer to command details structure or NULL
2729 * This function is used to write MAC address to the NVM (0x0108).
2732 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2733 struct ice_sq_cd *cd)
2735 struct ice_aqc_manage_mac_write *cmd;
2736 struct ice_aq_desc desc;
2738 cmd = &desc.params.mac_write;
2739 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2742 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2744 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2748 * ice_aq_clear_pxe_mode
2749 * @hw: pointer to the HW struct
2751 * Tell the firmware that the driver is taking over from PXE (0x0110).
2753 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2755 struct ice_aq_desc desc;
2757 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2758 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2760 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2764 * ice_clear_pxe_mode - clear pxe operations mode
2765 * @hw: pointer to the HW struct
2767 * Make sure all PXE mode settings are cleared, including things
2768 * like descriptor fetch/write-back mode.
2770 void ice_clear_pxe_mode(struct ice_hw *hw)
2772 if (ice_check_sq_alive(hw, &hw->adminq))
2773 ice_aq_clear_pxe_mode(hw);
2777 * ice_aq_set_port_params - set physical port parameters.
2778 * @pi: pointer to the port info struct
2779 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2780 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2781 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2782 * @double_vlan: if set double VLAN is enabled
2783 * @cd: pointer to command details structure or NULL
2785 * Set Physical port parameters (0x0203)
2788 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2789 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2790 struct ice_sq_cd *cd)
2793 struct ice_aqc_set_port_params *cmd;
2794 struct ice_hw *hw = pi->hw;
2795 struct ice_aq_desc desc;
2798 cmd = &desc.params.set_port_params;
2800 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2801 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2803 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2805 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2807 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2808 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2810 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2814 * ice_get_link_speed_based_on_phy_type - returns link speed
2815 * @phy_type_low: lower part of phy_type
2816 * @phy_type_high: higher part of phy_type
2818 * This helper function will convert an entry in PHY type structure
2819 * [phy_type_low, phy_type_high] to its corresponding link speed.
2820 * Note: In the structure of [phy_type_low, phy_type_high], there should
2821 * be one bit set, as this function will convert one PHY type to its
2823 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2824 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2827 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2829 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2830 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2832 switch (phy_type_low) {
2833 case ICE_PHY_TYPE_LOW_100BASE_TX:
2834 case ICE_PHY_TYPE_LOW_100M_SGMII:
2835 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2837 case ICE_PHY_TYPE_LOW_1000BASE_T:
2838 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2839 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2840 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2841 case ICE_PHY_TYPE_LOW_1G_SGMII:
2842 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2844 case ICE_PHY_TYPE_LOW_2500BASE_T:
2845 case ICE_PHY_TYPE_LOW_2500BASE_X:
2846 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2847 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2849 case ICE_PHY_TYPE_LOW_5GBASE_T:
2850 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2851 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2853 case ICE_PHY_TYPE_LOW_10GBASE_T:
2854 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2855 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2856 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2857 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2858 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2859 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2860 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2862 case ICE_PHY_TYPE_LOW_25GBASE_T:
2863 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2864 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2865 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2866 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2867 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2868 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2869 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2870 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2871 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2872 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2873 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2875 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2876 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2877 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2878 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2879 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2880 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2881 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2883 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2884 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2885 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2886 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2887 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2888 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2889 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2890 case ICE_PHY_TYPE_LOW_50G_AUI2:
2891 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2892 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2893 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2894 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2895 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2896 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2897 case ICE_PHY_TYPE_LOW_50G_AUI1:
2898 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2900 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2901 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2902 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2903 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2904 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2905 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2906 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2907 case ICE_PHY_TYPE_LOW_100G_AUI4:
2908 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2909 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2910 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2911 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2912 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2913 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2916 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2920 switch (phy_type_high) {
2921 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2922 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2923 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2924 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2925 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2926 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2929 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2933 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2934 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2935 return ICE_AQ_LINK_SPEED_UNKNOWN;
2936 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2937 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2938 return ICE_AQ_LINK_SPEED_UNKNOWN;
2939 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2940 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2941 return speed_phy_type_low;
2943 return speed_phy_type_high;
2947 * ice_update_phy_type
2948 * @phy_type_low: pointer to the lower part of phy_type
2949 * @phy_type_high: pointer to the higher part of phy_type
2950 * @link_speeds_bitmap: targeted link speeds bitmap
2952 * Note: For the link_speeds_bitmap structure, you can check it at
2953 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2954 * link_speeds_bitmap include multiple speeds.
2956 * Each entry in this [phy_type_low, phy_type_high] structure will
2957 * present a certain link speed. This helper function will turn on bits
2958 * in [phy_type_low, phy_type_high] structure based on the value of
2959 * link_speeds_bitmap input parameter.
2962 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2963 u16 link_speeds_bitmap)
2970 /* We first check with low part of phy_type */
2971 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2972 pt_low = BIT_ULL(index);
2973 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2975 if (link_speeds_bitmap & speed)
2976 *phy_type_low |= BIT_ULL(index);
2979 /* We then check with high part of phy_type */
2980 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2981 pt_high = BIT_ULL(index);
2982 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2984 if (link_speeds_bitmap & speed)
2985 *phy_type_high |= BIT_ULL(index);
2990 * ice_aq_set_phy_cfg
2991 * @hw: pointer to the HW struct
2992 * @pi: port info structure of the interested logical port
2993 * @cfg: structure with PHY configuration data to be set
2994 * @cd: pointer to command details structure or NULL
2996 * Set the various PHY configuration parameters supported on the Port.
2997 * One or more of the Set PHY config parameters may be ignored in an MFP
2998 * mode as the PF may not have the privilege to set some of the PHY Config
2999 * parameters. This status will be indicated by the command response (0x0601).
3002 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3003 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3005 struct ice_aq_desc desc;
3006 enum ice_status status;
3009 return ICE_ERR_PARAM;
3011 /* Ensure that only valid bits of cfg->caps can be turned on. */
3012 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3013 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3016 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3019 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3020 desc.params.set_phy.lport_num = pi->lport;
3021 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3023 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3024 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
3025 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3026 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
3027 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3028 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
3029 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
3030 cfg->low_power_ctrl_an);
3031 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
3032 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
3033 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
3036 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3038 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3039 status = ICE_SUCCESS;
3042 pi->phy.curr_user_phy_cfg = *cfg;
3048 * ice_update_link_info - update status of the HW network link
3049 * @pi: port info structure of the interested logical port
3051 enum ice_status ice_update_link_info(struct ice_port_info *pi)
3053 struct ice_link_status *li;
3054 enum ice_status status;
3057 return ICE_ERR_PARAM;
3059 li = &pi->phy.link_info;
3061 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3065 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3066 struct ice_aqc_get_phy_caps_data *pcaps;
3070 pcaps = (struct ice_aqc_get_phy_caps_data *)
3071 ice_malloc(hw, sizeof(*pcaps));
3073 return ICE_ERR_NO_MEMORY;
3075 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3078 if (status == ICE_SUCCESS)
3079 ice_memcpy(li->module_type, &pcaps->module_type,
3080 sizeof(li->module_type),
3081 ICE_NONDMA_TO_NONDMA);
3083 ice_free(hw, pcaps);
3090 * ice_cache_phy_user_req
3091 * @pi: port information structure
3092 * @cache_data: PHY logging data
3093 * @cache_mode: PHY logging mode
3095 * Log the user request on (FC, FEC, SPEED) for later user.
3098 ice_cache_phy_user_req(struct ice_port_info *pi,
3099 struct ice_phy_cache_mode_data cache_data,
3100 enum ice_phy_cache_mode cache_mode)
3105 switch (cache_mode) {
3107 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3109 case ICE_SPEED_MODE:
3110 pi->phy.curr_user_speed_req =
3111 cache_data.data.curr_user_speed_req;
3114 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3122 * ice_caps_to_fc_mode
3123 * @caps: PHY capabilities
3125 * Convert PHY FC capabilities to ice FC mode
3127 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3129 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3130 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3133 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3134 return ICE_FC_TX_PAUSE;
3136 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3137 return ICE_FC_RX_PAUSE;
3143 * ice_caps_to_fec_mode
3144 * @caps: PHY capabilities
3145 * @fec_options: Link FEC options
3147 * Convert PHY FEC capabilities to ice FEC mode
3149 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3151 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3152 return ICE_FEC_AUTO;
3154 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3155 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3156 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3157 ICE_AQC_PHY_FEC_25G_KR_REQ))
3158 return ICE_FEC_BASER;
3160 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3161 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3162 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3165 return ICE_FEC_NONE;
3169 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3170 * @pi: port information structure
3171 * @cfg: PHY configuration data to set FC mode
3172 * @req_mode: FC mode to configure
3174 static enum ice_status
3175 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3176 enum ice_fc_mode req_mode)
3178 struct ice_phy_cache_mode_data cache_data;
3179 u8 pause_mask = 0x0;
3182 return ICE_ERR_BAD_PTR;
3187 struct ice_aqc_get_phy_caps_data *pcaps;
3188 enum ice_status status;
3190 pcaps = (struct ice_aqc_get_phy_caps_data *)
3191 ice_malloc(pi->hw, sizeof(*pcaps));
3193 return ICE_ERR_NO_MEMORY;
3195 /* Query the value of FC that both the NIC and attached media
3198 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3201 ice_free(pi->hw, pcaps);
3205 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3206 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3208 ice_free(pi->hw, pcaps);
3212 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3213 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3215 case ICE_FC_RX_PAUSE:
3216 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3218 case ICE_FC_TX_PAUSE:
3219 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3225 /* clear the old pause settings */
3226 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3227 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3229 /* set the new capabilities */
3230 cfg->caps |= pause_mask;
3232 /* Cache user FC request */
3233 cache_data.data.curr_user_fc_req = req_mode;
3234 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3241 * @pi: port information structure
3242 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3243 * @ena_auto_link_update: enable automatic link update
3245 * Set the requested flow control mode.
3248 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3250 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3251 struct ice_aqc_get_phy_caps_data *pcaps;
3252 enum ice_status status;
3255 if (!pi || !aq_failures)
3256 return ICE_ERR_BAD_PTR;
3261 pcaps = (struct ice_aqc_get_phy_caps_data *)
3262 ice_malloc(hw, sizeof(*pcaps));
3264 return ICE_ERR_NO_MEMORY;
3266 /* Get the current PHY config */
3267 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3271 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3275 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3277 /* Configure the set PHY data */
3278 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3280 if (status != ICE_ERR_BAD_PTR)
3281 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3286 /* If the capabilities have changed, then set the new config */
3287 if (cfg.caps != pcaps->caps) {
3288 int retry_count, retry_max = 10;
3290 /* Auto restart link so settings take effect */
3291 if (ena_auto_link_update)
3292 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3294 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3296 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3300 /* Update the link info
3301 * It sometimes takes a really long time for link to
3302 * come back from the atomic reset. Thus, we wait a
3305 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3306 status = ice_update_link_info(pi);
3308 if (status == ICE_SUCCESS)
3311 ice_msec_delay(100, true);
3315 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3319 ice_free(hw, pcaps);
3324 * ice_phy_caps_equals_cfg
3325 * @phy_caps: PHY capabilities
3326 * @phy_cfg: PHY configuration
3328 * Helper function to determine if PHY capabilities matches PHY
3332 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3333 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3335 u8 caps_mask, cfg_mask;
3337 if (!phy_caps || !phy_cfg)
3340 /* These bits are not common between capabilities and configuration.
3341 * Do not use them to determine equality.
3343 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3344 ICE_AQC_PHY_EN_MOD_QUAL);
3345 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3347 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3348 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3349 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3350 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3351 phy_caps->eee_cap != phy_cfg->eee_cap ||
3352 phy_caps->eeer_value != phy_cfg->eeer_value ||
3353 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3360 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3361 * @pi: port information structure
3362 * @caps: PHY ability structure to copy date from
3363 * @cfg: PHY configuration structure to copy data to
3365 * Helper function to copy AQC PHY get ability data to PHY set configuration
3369 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3370 struct ice_aqc_get_phy_caps_data *caps,
3371 struct ice_aqc_set_phy_cfg_data *cfg)
3373 if (!pi || !caps || !cfg)
3376 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3377 cfg->phy_type_low = caps->phy_type_low;
3378 cfg->phy_type_high = caps->phy_type_high;
3379 cfg->caps = caps->caps;
3380 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3381 cfg->eee_cap = caps->eee_cap;
3382 cfg->eeer_value = caps->eeer_value;
3383 cfg->link_fec_opt = caps->link_fec_options;
3384 cfg->module_compliance_enforcement =
3385 caps->module_compliance_enforcement;
3389 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3390 * @pi: port information structure
3391 * @cfg: PHY configuration data to set FEC mode
3392 * @fec: FEC mode to configure
3395 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3396 enum ice_fec_mode fec)
3398 struct ice_aqc_get_phy_caps_data *pcaps;
3399 enum ice_status status = ICE_SUCCESS;
3403 return ICE_ERR_BAD_PTR;
3407 pcaps = (struct ice_aqc_get_phy_caps_data *)
3408 ice_malloc(hw, sizeof(*pcaps));
3410 return ICE_ERR_NO_MEMORY;
3412 status = ice_aq_get_phy_caps(pi, false,
3413 (ice_fw_supports_report_dflt_cfg(hw) ?
3414 ICE_AQC_REPORT_DFLT_CFG :
3415 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3420 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3421 cfg->link_fec_opt = pcaps->link_fec_options;
3425 /* Clear RS bits, and AND BASE-R ability
3426 * bits and OR request bits.
3428 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3429 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3430 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3431 ICE_AQC_PHY_FEC_25G_KR_REQ;
3434 /* Clear BASE-R bits, and AND RS ability
3435 * bits and OR request bits.
3437 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3438 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3439 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3442 /* Clear all FEC option bits. */
3443 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3446 /* AND auto FEC bit, and all caps bits. */
3447 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3448 cfg->link_fec_opt |= pcaps->link_fec_options;
3451 status = ICE_ERR_PARAM;
3455 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3456 !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3457 struct ice_link_default_override_tlv tlv;
3459 if (ice_get_link_default_override(&tlv, pi))
3462 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3463 (tlv.options & ICE_LINK_OVERRIDE_EN))
3464 cfg->link_fec_opt = tlv.fec_options;
3468 ice_free(hw, pcaps);
3474 * ice_get_link_status - get status of the HW network link
3475 * @pi: port information structure
3476 * @link_up: pointer to bool (true/false = linkup/linkdown)
3478 * Variable link_up is true if link is up, false if link is down.
3479 * The variable link_up is invalid if status is non zero. As a
3480 * result of this call, link status reporting becomes enabled
3482 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3484 struct ice_phy_info *phy_info;
3485 enum ice_status status = ICE_SUCCESS;
3487 if (!pi || !link_up)
3488 return ICE_ERR_PARAM;
3490 phy_info = &pi->phy;
3492 if (phy_info->get_link_info) {
3493 status = ice_update_link_info(pi);
3496 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3500 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3506 * ice_aq_set_link_restart_an
3507 * @pi: pointer to the port information structure
3508 * @ena_link: if true: enable link, if false: disable link
3509 * @cd: pointer to command details structure or NULL
3511 * Sets up the link and restarts the Auto-Negotiation over the link.
3514 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3515 struct ice_sq_cd *cd)
3517 struct ice_aqc_restart_an *cmd;
3518 struct ice_aq_desc desc;
3520 cmd = &desc.params.restart_an;
3522 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3524 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3525 cmd->lport_num = pi->lport;
3527 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3529 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3531 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3535 * ice_aq_set_event_mask
3536 * @hw: pointer to the HW struct
3537 * @port_num: port number of the physical function
3538 * @mask: event mask to be set
3539 * @cd: pointer to command details structure or NULL
3541 * Set event mask (0x0613)
3544 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3545 struct ice_sq_cd *cd)
3547 struct ice_aqc_set_event_mask *cmd;
3548 struct ice_aq_desc desc;
3550 cmd = &desc.params.set_event_mask;
3552 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3554 cmd->lport_num = port_num;
3556 cmd->event_mask = CPU_TO_LE16(mask);
3557 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3561 * ice_aq_set_mac_loopback
3562 * @hw: pointer to the HW struct
3563 * @ena_lpbk: Enable or Disable loopback
3564 * @cd: pointer to command details structure or NULL
3566 * Enable/disable loopback on a given port
3569 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3571 struct ice_aqc_set_mac_lb *cmd;
3572 struct ice_aq_desc desc;
3574 cmd = &desc.params.set_mac_lb;
3576 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3578 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3580 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3584 * ice_aq_set_port_id_led
3585 * @pi: pointer to the port information
3586 * @is_orig_mode: is this LED set to original mode (by the net-list)
3587 * @cd: pointer to command details structure or NULL
3589 * Set LED value for the given port (0x06e9)
3592 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3593 struct ice_sq_cd *cd)
3595 struct ice_aqc_set_port_id_led *cmd;
3596 struct ice_hw *hw = pi->hw;
3597 struct ice_aq_desc desc;
3599 cmd = &desc.params.set_port_id_led;
3601 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3604 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3606 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3608 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3613 * @hw: pointer to the HW struct
3614 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3615 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3616 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3618 * @set_page: set or ignore the page
3619 * @data: pointer to data buffer to be read/written to the I2C device.
3620 * @length: 1-16 for read, 1 for write.
3621 * @write: 0 read, 1 for write.
3622 * @cd: pointer to command details structure or NULL
3624 * Read/Write SFF EEPROM (0x06EE)
3627 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3628 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3629 bool write, struct ice_sq_cd *cd)
3631 struct ice_aqc_sff_eeprom *cmd;
3632 struct ice_aq_desc desc;
3633 enum ice_status status;
3635 if (!data || (mem_addr & 0xff00))
3636 return ICE_ERR_PARAM;
3638 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3639 cmd = &desc.params.read_write_sff_param;
3640 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3641 cmd->lport_num = (u8)(lport & 0xff);
3642 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3643 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3644 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3646 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3647 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3648 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3649 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3651 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3653 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3658 * ice_aq_prog_topo_dev_nvm
3659 * @hw: pointer to the hardware structure
3660 * @topo_params: pointer to structure storing topology parameters for a device
3661 * @cd: pointer to command details structure or NULL
3663 * Program Topology Device NVM (0x06F2)
3667 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
3668 struct ice_aqc_link_topo_params *topo_params,
3669 struct ice_sq_cd *cd)
3671 struct ice_aqc_prog_topo_dev_nvm *cmd;
3672 struct ice_aq_desc desc;
3674 cmd = &desc.params.prog_topo_dev_nvm;
3676 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
3678 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3679 ICE_NONDMA_TO_NONDMA);
3681 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3685 * ice_aq_read_topo_dev_nvm
3686 * @hw: pointer to the hardware structure
3687 * @topo_params: pointer to structure storing topology parameters for a device
3688 * @start_address: byte offset in the topology device NVM
3689 * @data: pointer to data buffer
3690 * @data_size: number of bytes to be read from the topology device NVM
3691 * @cd: pointer to command details structure or NULL
3692 * Read Topology Device NVM (0x06F3)
3696 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
3697 struct ice_aqc_link_topo_params *topo_params,
3698 u32 start_address, u8 *data, u8 data_size,
3699 struct ice_sq_cd *cd)
3701 struct ice_aqc_read_topo_dev_nvm *cmd;
3702 struct ice_aq_desc desc;
3703 enum ice_status status;
3705 if (!data || data_size == 0 ||
3706 data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
3707 return ICE_ERR_PARAM;
3709 cmd = &desc.params.read_topo_dev_nvm;
3711 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
3713 desc.datalen = data_size;
3714 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3715 ICE_NONDMA_TO_NONDMA);
3716 cmd->start_address = CPU_TO_LE32(start_address);
3718 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3722 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
3728 * __ice_aq_get_set_rss_lut
3729 * @hw: pointer to the hardware structure
3730 * @params: RSS LUT parameters
3731 * @set: set true to set the table, false to get the table
3733 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3735 static enum ice_status
3736 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3738 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3739 struct ice_aqc_get_set_rss_lut *cmd_resp;
3740 struct ice_aq_desc desc;
3741 enum ice_status status;
3745 return ICE_ERR_PARAM;
3747 vsi_handle = params->vsi_handle;
3750 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3751 return ICE_ERR_PARAM;
3753 lut_size = params->lut_size;
3754 lut_type = params->lut_type;
3755 glob_lut_idx = params->global_lut_id;
3756 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3758 cmd_resp = &desc.params.get_set_rss_lut;
3761 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3762 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3764 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3767 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3768 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3769 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3770 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3773 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3774 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3775 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3776 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3777 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3780 status = ICE_ERR_PARAM;
3781 goto ice_aq_get_set_rss_lut_exit;
3784 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3785 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3786 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3789 goto ice_aq_get_set_rss_lut_send;
3790 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3792 goto ice_aq_get_set_rss_lut_send;
3794 goto ice_aq_get_set_rss_lut_send;
3797 /* LUT size is only valid for Global and PF table types */
3799 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3800 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3801 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3802 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3804 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3805 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3806 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3807 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3809 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3810 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3811 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3812 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3813 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3818 status = ICE_ERR_PARAM;
3819 goto ice_aq_get_set_rss_lut_exit;
3822 ice_aq_get_set_rss_lut_send:
3823 cmd_resp->flags = CPU_TO_LE16(flags);
3824 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3826 ice_aq_get_set_rss_lut_exit:
3831 * ice_aq_get_rss_lut
3832 * @hw: pointer to the hardware structure
3833 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3835 * get the RSS lookup table, PF or VSI type
3838 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3840 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3844 * ice_aq_set_rss_lut
3845 * @hw: pointer to the hardware structure
3846 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3848 * set the RSS lookup table, PF or VSI type
3851 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3853 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3857 * __ice_aq_get_set_rss_key
3858 * @hw: pointer to the HW struct
3859 * @vsi_id: VSI FW index
3860 * @key: pointer to key info struct
3861 * @set: set true to set the key, false to get the key
3863 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3866 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3867 struct ice_aqc_get_set_rss_keys *key,
3870 struct ice_aqc_get_set_rss_key *cmd_resp;
3871 u16 key_size = sizeof(*key);
3872 struct ice_aq_desc desc;
3874 cmd_resp = &desc.params.get_set_rss_key;
3877 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3878 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3880 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3883 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3884 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3885 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3886 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3888 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3892 * ice_aq_get_rss_key
3893 * @hw: pointer to the HW struct
3894 * @vsi_handle: software VSI handle
3895 * @key: pointer to key info struct
3897 * get the RSS key per VSI
3900 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3901 struct ice_aqc_get_set_rss_keys *key)
3903 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3904 return ICE_ERR_PARAM;
3906 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3911 * ice_aq_set_rss_key
3912 * @hw: pointer to the HW struct
3913 * @vsi_handle: software VSI handle
3914 * @keys: pointer to key info struct
3916 * set the RSS key per VSI
3919 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3920 struct ice_aqc_get_set_rss_keys *keys)
3922 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3923 return ICE_ERR_PARAM;
3925 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3930 * ice_aq_add_lan_txq
3931 * @hw: pointer to the hardware structure
3932 * @num_qgrps: Number of added queue groups
3933 * @qg_list: list of queue groups to be added
3934 * @buf_size: size of buffer for indirect command
3935 * @cd: pointer to command details structure or NULL
3937 * Add Tx LAN queue (0x0C30)
3940 * Prior to calling add Tx LAN queue:
3941 * Initialize the following as part of the Tx queue context:
3942 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3943 * Cache profile and Packet shaper profile.
3945 * After add Tx LAN queue AQ command is completed:
3946 * Interrupts should be associated with specific queues,
3947 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3951 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3952 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3953 struct ice_sq_cd *cd)
3955 struct ice_aqc_add_tx_qgrp *list;
3956 struct ice_aqc_add_txqs *cmd;
3957 struct ice_aq_desc desc;
3958 u16 i, sum_size = 0;
3960 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3962 cmd = &desc.params.add_txqs;
3964 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3967 return ICE_ERR_PARAM;
3969 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3970 return ICE_ERR_PARAM;
3972 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3973 sum_size += ice_struct_size(list, txqs, list->num_txqs);
3974 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3978 if (buf_size != sum_size)
3979 return ICE_ERR_PARAM;
3981 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3983 cmd->num_qgrps = num_qgrps;
3985 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3989 * ice_aq_dis_lan_txq
3990 * @hw: pointer to the hardware structure
3991 * @num_qgrps: number of groups in the list
3992 * @qg_list: the list of groups to disable
3993 * @buf_size: the total size of the qg_list buffer in bytes
3994 * @rst_src: if called due to reset, specifies the reset source
3995 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3996 * @cd: pointer to command details structure or NULL
3998 * Disable LAN Tx queue (0x0C31)
4000 static enum ice_status
4001 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4002 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4003 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4004 struct ice_sq_cd *cd)
4006 struct ice_aqc_dis_txq_item *item;
4007 struct ice_aqc_dis_txqs *cmd;
4008 struct ice_aq_desc desc;
4009 enum ice_status status;
4012 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4013 cmd = &desc.params.dis_txqs;
4014 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4016 /* qg_list can be NULL only in VM/VF reset flow */
4017 if (!qg_list && !rst_src)
4018 return ICE_ERR_PARAM;
4020 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4021 return ICE_ERR_PARAM;
4023 cmd->num_entries = num_qgrps;
4025 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4026 ICE_AQC_Q_DIS_TIMEOUT_M);
4030 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4031 cmd->vmvf_and_timeout |=
4032 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4039 /* flush pipe on time out */
4040 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4041 /* If no queue group info, we are in a reset flow. Issue the AQ */
4045 /* set RD bit to indicate that command buffer is provided by the driver
4046 * and it needs to be read by the firmware
4048 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4050 for (i = 0, item = qg_list; i < num_qgrps; i++) {
4051 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4053 /* If the num of queues is even, add 2 bytes of padding */
4054 if ((item->num_qs % 2) == 0)
4059 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4063 return ICE_ERR_PARAM;
4066 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4069 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4070 vmvf_num, hw->adminq.sq_last_status);
4072 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4073 LE16_TO_CPU(qg_list[0].q_id[0]),
4074 hw->adminq.sq_last_status);
4080 * ice_aq_move_recfg_lan_txq
4081 * @hw: pointer to the hardware structure
4082 * @num_qs: number of queues to move/reconfigure
4083 * @is_move: true if this operation involves node movement
4084 * @is_tc_change: true if this operation involves a TC change
4085 * @subseq_call: true if this operation is a subsequent call
4086 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4087 * @timeout: timeout in units of 100 usec (valid values 0-50)
4088 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
4089 * @buf: struct containing src/dest TEID and per-queue info
4090 * @buf_size: size of buffer for indirect command
4091 * @txqs_moved: out param, number of queues successfully moved
4092 * @cd: pointer to command details structure or NULL
4094 * Move / Reconfigure Tx LAN queues (0x0C32)
4097 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4098 bool is_tc_change, bool subseq_call, bool flush_pipe,
4099 u8 timeout, u32 *blocked_cgds,
4100 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
4101 u8 *txqs_moved, struct ice_sq_cd *cd)
4103 struct ice_aqc_move_txqs *cmd;
4104 struct ice_aq_desc desc;
4105 enum ice_status status;
4107 cmd = &desc.params.move_txqs;
4108 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
4110 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
4111 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4112 return ICE_ERR_PARAM;
4114 if (is_tc_change && !flush_pipe && !blocked_cgds)
4115 return ICE_ERR_PARAM;
4117 if (!is_move && !is_tc_change)
4118 return ICE_ERR_PARAM;
4120 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4123 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4126 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4129 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4132 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4134 cmd->num_qs = num_qs;
4135 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4136 ICE_AQC_Q_CMD_TIMEOUT_M);
4138 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4140 if (!status && txqs_moved)
4141 *txqs_moved = cmd->num_qs;
4143 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4144 is_tc_change && !flush_pipe)
4145 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4150 /* End of FW Admin Queue command wrappers */
4153 * ice_write_byte - write a byte to a packed context structure
4154 * @src_ctx: the context structure to read from
4155 * @dest_ctx: the context to be written to
4156 * @ce_info: a description of the struct to be filled
4159 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4161 u8 src_byte, dest_byte, mask;
4165 /* copy from the next struct field */
4166 from = src_ctx + ce_info->offset;
4168 /* prepare the bits and mask */
4169 shift_width = ce_info->lsb % 8;
4170 mask = (u8)(BIT(ce_info->width) - 1);
4175 /* shift to correct alignment */
4176 mask <<= shift_width;
4177 src_byte <<= shift_width;
4179 /* get the current bits from the target bit string */
4180 dest = dest_ctx + (ce_info->lsb / 8);
4182 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4184 dest_byte &= ~mask; /* get the bits not changing */
4185 dest_byte |= src_byte; /* add in the new bits */
4187 /* put it all back */
4188 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4192 * ice_write_word - write a word to a packed context structure
4193 * @src_ctx: the context structure to read from
4194 * @dest_ctx: the context to be written to
4195 * @ce_info: a description of the struct to be filled
4198 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4205 /* copy from the next struct field */
4206 from = src_ctx + ce_info->offset;
4208 /* prepare the bits and mask */
4209 shift_width = ce_info->lsb % 8;
4210 mask = BIT(ce_info->width) - 1;
4212 /* don't swizzle the bits until after the mask because the mask bits
4213 * will be in a different bit position on big endian machines
4215 src_word = *(u16 *)from;
4218 /* shift to correct alignment */
4219 mask <<= shift_width;
4220 src_word <<= shift_width;
4222 /* get the current bits from the target bit string */
4223 dest = dest_ctx + (ce_info->lsb / 8);
4225 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
4227 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
4228 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
4230 /* put it all back */
4231 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4235 * ice_write_dword - write a dword to a packed context structure
4236 * @src_ctx: the context structure to read from
4237 * @dest_ctx: the context to be written to
4238 * @ce_info: a description of the struct to be filled
4241 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4243 u32 src_dword, mask;
4248 /* copy from the next struct field */
4249 from = src_ctx + ce_info->offset;
4251 /* prepare the bits and mask */
4252 shift_width = ce_info->lsb % 8;
4254 /* if the field width is exactly 32 on an x86 machine, then the shift
4255 * operation will not work because the SHL instructions count is masked
4256 * to 5 bits so the shift will do nothing
4258 if (ce_info->width < 32)
4259 mask = BIT(ce_info->width) - 1;
4263 /* don't swizzle the bits until after the mask because the mask bits
4264 * will be in a different bit position on big endian machines
4266 src_dword = *(u32 *)from;
4269 /* shift to correct alignment */
4270 mask <<= shift_width;
4271 src_dword <<= shift_width;
4273 /* get the current bits from the target bit string */
4274 dest = dest_ctx + (ce_info->lsb / 8);
4276 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4278 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
4279 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
4281 /* put it all back */
4282 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4286 * ice_write_qword - write a qword to a packed context structure
4287 * @src_ctx: the context structure to read from
4288 * @dest_ctx: the context to be written to
4289 * @ce_info: a description of the struct to be filled
4292 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4294 u64 src_qword, mask;
4299 /* copy from the next struct field */
4300 from = src_ctx + ce_info->offset;
4302 /* prepare the bits and mask */
4303 shift_width = ce_info->lsb % 8;
4305 /* if the field width is exactly 64 on an x86 machine, then the shift
4306 * operation will not work because the SHL instructions count is masked
4307 * to 6 bits so the shift will do nothing
4309 if (ce_info->width < 64)
4310 mask = BIT_ULL(ce_info->width) - 1;
4314 /* don't swizzle the bits until after the mask because the mask bits
4315 * will be in a different bit position on big endian machines
4317 src_qword = *(u64 *)from;
4320 /* shift to correct alignment */
4321 mask <<= shift_width;
4322 src_qword <<= shift_width;
4324 /* get the current bits from the target bit string */
4325 dest = dest_ctx + (ce_info->lsb / 8);
4327 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4329 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
4330 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
4332 /* put it all back */
4333 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4337 * ice_set_ctx - set context bits in packed structure
4338 * @hw: pointer to the hardware structure
4339 * @src_ctx: pointer to a generic non-packed context structure
4340 * @dest_ctx: pointer to memory for the packed structure
4341 * @ce_info: a description of the structure to be transformed
4344 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4345 const struct ice_ctx_ele *ce_info)
4349 for (f = 0; ce_info[f].width; f++) {
4350 /* We have to deal with each element of the FW response
4351 * using the correct size so that we are correct regardless
4352 * of the endianness of the machine.
4354 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4355 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4356 f, ce_info[f].width, ce_info[f].size_of);
4359 switch (ce_info[f].size_of) {
4361 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4364 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4367 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4370 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4373 return ICE_ERR_INVAL_SIZE;
4381 * ice_read_byte - read context byte into struct
4382 * @src_ctx: the context structure to read from
4383 * @dest_ctx: the context to be written to
4384 * @ce_info: a description of the struct to be filled
4387 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4393 /* prepare the bits and mask */
4394 shift_width = ce_info->lsb % 8;
4395 mask = (u8)(BIT(ce_info->width) - 1);
4397 /* shift to correct alignment */
4398 mask <<= shift_width;
4400 /* get the current bits from the src bit string */
4401 src = src_ctx + (ce_info->lsb / 8);
4403 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4405 dest_byte &= ~(mask);
4407 dest_byte >>= shift_width;
4409 /* get the address from the struct field */
4410 target = dest_ctx + ce_info->offset;
4412 /* put it back in the struct */
4413 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4417 * ice_read_word - read context word into struct
4418 * @src_ctx: the context structure to read from
4419 * @dest_ctx: the context to be written to
4420 * @ce_info: a description of the struct to be filled
4423 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4425 u16 dest_word, mask;
4430 /* prepare the bits and mask */
4431 shift_width = ce_info->lsb % 8;
4432 mask = BIT(ce_info->width) - 1;
4434 /* shift to correct alignment */
4435 mask <<= shift_width;
4437 /* get the current bits from the src bit string */
4438 src = src_ctx + (ce_info->lsb / 8);
4440 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4442 /* the data in the memory is stored as little endian so mask it
4445 src_word &= ~(CPU_TO_LE16(mask));
4447 /* get the data back into host order before shifting */
4448 dest_word = LE16_TO_CPU(src_word);
4450 dest_word >>= shift_width;
4452 /* get the address from the struct field */
4453 target = dest_ctx + ce_info->offset;
4455 /* put it back in the struct */
4456 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4460 * ice_read_dword - read context dword into struct
4461 * @src_ctx: the context structure to read from
4462 * @dest_ctx: the context to be written to
4463 * @ce_info: a description of the struct to be filled
4466 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4468 u32 dest_dword, mask;
4473 /* prepare the bits and mask */
4474 shift_width = ce_info->lsb % 8;
4476 /* if the field width is exactly 32 on an x86 machine, then the shift
4477 * operation will not work because the SHL instructions count is masked
4478 * to 5 bits so the shift will do nothing
4480 if (ce_info->width < 32)
4481 mask = BIT(ce_info->width) - 1;
4485 /* shift to correct alignment */
4486 mask <<= shift_width;
4488 /* get the current bits from the src bit string */
4489 src = src_ctx + (ce_info->lsb / 8);
4491 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4493 /* the data in the memory is stored as little endian so mask it
4496 src_dword &= ~(CPU_TO_LE32(mask));
4498 /* get the data back into host order before shifting */
4499 dest_dword = LE32_TO_CPU(src_dword);
4501 dest_dword >>= shift_width;
4503 /* get the address from the struct field */
4504 target = dest_ctx + ce_info->offset;
4506 /* put it back in the struct */
4507 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4511 * ice_read_qword - read context qword into struct
4512 * @src_ctx: the context structure to read from
4513 * @dest_ctx: the context to be written to
4514 * @ce_info: a description of the struct to be filled
4517 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4519 u64 dest_qword, mask;
4524 /* prepare the bits and mask */
4525 shift_width = ce_info->lsb % 8;
4527 /* if the field width is exactly 64 on an x86 machine, then the shift
4528 * operation will not work because the SHL instructions count is masked
4529 * to 6 bits so the shift will do nothing
4531 if (ce_info->width < 64)
4532 mask = BIT_ULL(ce_info->width) - 1;
4536 /* shift to correct alignment */
4537 mask <<= shift_width;
4539 /* get the current bits from the src bit string */
4540 src = src_ctx + (ce_info->lsb / 8);
4542 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4544 /* the data in the memory is stored as little endian so mask it
4547 src_qword &= ~(CPU_TO_LE64(mask));
4549 /* get the data back into host order before shifting */
4550 dest_qword = LE64_TO_CPU(src_qword);
4552 dest_qword >>= shift_width;
4554 /* get the address from the struct field */
4555 target = dest_ctx + ce_info->offset;
4557 /* put it back in the struct */
4558 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4562 * ice_get_ctx - extract context bits from a packed structure
4563 * @src_ctx: pointer to a generic packed context structure
4564 * @dest_ctx: pointer to a generic non-packed context structure
4565 * @ce_info: a description of the structure to be read from
4568 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4572 for (f = 0; ce_info[f].width; f++) {
4573 switch (ce_info[f].size_of) {
4575 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4578 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4581 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4584 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4587 /* nothing to do, just keep going */
4596 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4597 * @hw: pointer to the HW struct
4598 * @vsi_handle: software VSI handle
4600 * @q_handle: software queue handle
4603 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4605 struct ice_vsi_ctx *vsi;
4606 struct ice_q_ctx *q_ctx;
4608 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4611 if (q_handle >= vsi->num_lan_q_entries[tc])
4613 if (!vsi->lan_q_ctx[tc])
4615 q_ctx = vsi->lan_q_ctx[tc];
4616 return &q_ctx[q_handle];
4621 * @pi: port information structure
4622 * @vsi_handle: software VSI handle
4624 * @q_handle: software queue handle
4625 * @num_qgrps: Number of added queue groups
4626 * @buf: list of queue groups to be added
4627 * @buf_size: size of buffer for indirect command
4628 * @cd: pointer to command details structure or NULL
4630 * This function adds one LAN queue
4633 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4634 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4635 struct ice_sq_cd *cd)
4637 struct ice_aqc_txsched_elem_data node = { 0 };
4638 struct ice_sched_node *parent;
4639 struct ice_q_ctx *q_ctx;
4640 enum ice_status status;
4643 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4646 if (num_qgrps > 1 || buf->num_txqs > 1)
4647 return ICE_ERR_MAX_LIMIT;
4651 if (!ice_is_vsi_valid(hw, vsi_handle))
4652 return ICE_ERR_PARAM;
4654 ice_acquire_lock(&pi->sched_lock);
4656 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4658 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4660 status = ICE_ERR_PARAM;
4664 /* find a parent node */
4665 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4666 ICE_SCHED_NODE_OWNER_LAN);
4668 status = ICE_ERR_PARAM;
4672 buf->parent_teid = parent->info.node_teid;
4673 node.parent_teid = parent->info.node_teid;
4674 /* Mark that the values in the "generic" section as valid. The default
4675 * value in the "generic" section is zero. This means that :
4676 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4677 * - 0 priority among siblings, indicated by Bit 1-3.
4678 * - WFQ, indicated by Bit 4.
4679 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4681 * - Bit 7 is reserved.
4682 * Without setting the generic section as valid in valid_sections, the
4683 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4685 buf->txqs[0].info.valid_sections =
4686 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4687 ICE_AQC_ELEM_VALID_EIR;
4688 buf->txqs[0].info.generic = 0;
4689 buf->txqs[0].info.cir_bw.bw_profile_idx =
4690 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4691 buf->txqs[0].info.cir_bw.bw_alloc =
4692 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4693 buf->txqs[0].info.eir_bw.bw_profile_idx =
4694 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4695 buf->txqs[0].info.eir_bw.bw_alloc =
4696 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4698 /* add the LAN queue */
4699 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4700 if (status != ICE_SUCCESS) {
4701 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4702 LE16_TO_CPU(buf->txqs[0].txq_id),
4703 hw->adminq.sq_last_status);
4707 node.node_teid = buf->txqs[0].q_teid;
4708 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4709 q_ctx->q_handle = q_handle;
4710 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4712 /* add a leaf node into scheduler tree queue layer */
4713 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4715 status = ice_sched_replay_q_bw(pi, q_ctx);
4718 ice_release_lock(&pi->sched_lock);
4724 * @pi: port information structure
4725 * @vsi_handle: software VSI handle
4727 * @num_queues: number of queues
4728 * @q_handles: pointer to software queue handle array
4729 * @q_ids: pointer to the q_id array
4730 * @q_teids: pointer to queue node teids
4731 * @rst_src: if called due to reset, specifies the reset source
4732 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4733 * @cd: pointer to command details structure or NULL
4735 * This function removes queues and their corresponding nodes in SW DB
4738 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4739 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4740 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4741 struct ice_sq_cd *cd)
4743 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4744 struct ice_aqc_dis_txq_item *qg_list;
4745 struct ice_q_ctx *q_ctx;
4749 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4755 /* if queue is disabled already yet the disable queue command
4756 * has to be sent to complete the VF reset, then call
4757 * ice_aq_dis_lan_txq without any queue information
4760 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4765 buf_size = ice_struct_size(qg_list, q_id, 1);
4766 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
4768 return ICE_ERR_NO_MEMORY;
4770 ice_acquire_lock(&pi->sched_lock);
4772 for (i = 0; i < num_queues; i++) {
4773 struct ice_sched_node *node;
4775 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4778 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4780 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4784 if (q_ctx->q_handle != q_handles[i]) {
4785 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4786 q_ctx->q_handle, q_handles[i]);
4789 qg_list->parent_teid = node->info.parent_teid;
4790 qg_list->num_qs = 1;
4791 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
4792 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4795 if (status != ICE_SUCCESS)
4797 ice_free_sched_node(pi, node);
4798 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4800 ice_release_lock(&pi->sched_lock);
4801 ice_free(hw, qg_list);
4806 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4807 * @pi: port information structure
4808 * @vsi_handle: software VSI handle
4809 * @tc_bitmap: TC bitmap
4810 * @maxqs: max queues array per TC
4811 * @owner: LAN or RDMA
4813 * This function adds/updates the VSI queues per TC.
4815 static enum ice_status
4816 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4817 u16 *maxqs, u8 owner)
4819 enum ice_status status = ICE_SUCCESS;
4822 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4825 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4826 return ICE_ERR_PARAM;
4828 ice_acquire_lock(&pi->sched_lock);
4830 ice_for_each_traffic_class(i) {
4831 /* configuration is possible only if TC node is present */
4832 if (!ice_sched_get_tc_node(pi, i))
4835 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4836 ice_is_tc_ena(tc_bitmap, i));
4841 ice_release_lock(&pi->sched_lock);
4846 * ice_cfg_vsi_lan - configure VSI LAN queues
4847 * @pi: port information structure
4848 * @vsi_handle: software VSI handle
4849 * @tc_bitmap: TC bitmap
4850 * @max_lanqs: max LAN queues array per TC
4852 * This function adds/updates the VSI LAN queues per TC.
4855 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4858 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4859 ICE_SCHED_NODE_OWNER_LAN);
4863 * ice_is_main_vsi - checks whether the VSI is main VSI
4864 * @hw: pointer to the HW struct
4865 * @vsi_handle: VSI handle
4867 * Checks whether the VSI is the main VSI (the first PF VSI created on
4870 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4872 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4876 * ice_replay_pre_init - replay pre initialization
4877 * @hw: pointer to the HW struct
4878 * @sw: pointer to switch info struct for which function initializes filters
4880 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4882 static enum ice_status
4883 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4885 enum ice_status status;
4888 /* Delete old entries from replay filter list head if there is any */
4889 ice_rm_sw_replay_rule_info(hw, sw);
4890 /* In start of replay, move entries into replay_rules list, it
4891 * will allow adding rules entries back to filt_rules list,
4892 * which is operational list.
4894 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4895 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4896 &sw->recp_list[i].filt_replay_rules);
4897 ice_sched_replay_agg_vsi_preinit(hw);
4899 status = ice_sched_replay_root_node_bw(hw->port_info);
4903 return ice_sched_replay_tc_node_bw(hw->port_info);
4907 * ice_replay_vsi - replay VSI configuration
4908 * @hw: pointer to the HW struct
4909 * @vsi_handle: driver VSI handle
4911 * Restore all VSI configuration after reset. It is required to call this
4912 * function with main VSI first.
4914 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4916 struct ice_switch_info *sw = hw->switch_info;
4917 struct ice_port_info *pi = hw->port_info;
4918 enum ice_status status;
4920 if (!ice_is_vsi_valid(hw, vsi_handle))
4921 return ICE_ERR_PARAM;
4923 /* Replay pre-initialization if there is any */
4924 if (ice_is_main_vsi(hw, vsi_handle)) {
4925 status = ice_replay_pre_init(hw, sw);
4929 /* Replay per VSI all RSS configurations */
4930 status = ice_replay_rss_cfg(hw, vsi_handle);
4933 /* Replay per VSI all filters */
4934 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4936 status = ice_replay_vsi_agg(hw, vsi_handle);
4941 * ice_replay_post - post replay configuration cleanup
4942 * @hw: pointer to the HW struct
4944 * Post replay cleanup.
4946 void ice_replay_post(struct ice_hw *hw)
4948 /* Delete old entries from replay filter list head */
4949 ice_rm_all_sw_replay_rule_info(hw);
4950 ice_sched_replay_agg(hw);
4954 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4955 * @hw: ptr to the hardware info
4956 * @reg: offset of 64 bit HW register to read from
4957 * @prev_stat_loaded: bool to specify if previous stats are loaded
4958 * @prev_stat: ptr to previous loaded stat value
4959 * @cur_stat: ptr to current stat value
4962 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4963 u64 *prev_stat, u64 *cur_stat)
4965 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4967 /* device stats are not reset at PFR, they likely will not be zeroed
4968 * when the driver starts. Thus, save the value from the first read
4969 * without adding to the statistic value so that we report stats which
4970 * count up from zero.
4972 if (!prev_stat_loaded) {
4973 *prev_stat = new_data;
4977 /* Calculate the difference between the new and old values, and then
4978 * add it to the software stat value.
4980 if (new_data >= *prev_stat)
4981 *cur_stat += new_data - *prev_stat;
4983 /* to manage the potential roll-over */
4984 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4986 /* Update the previously stored value to prepare for next read */
4987 *prev_stat = new_data;
4991 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4992 * @hw: ptr to the hardware info
4993 * @reg: offset of HW register to read from
4994 * @prev_stat_loaded: bool to specify if previous stats are loaded
4995 * @prev_stat: ptr to previous loaded stat value
4996 * @cur_stat: ptr to current stat value
4999 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5000 u64 *prev_stat, u64 *cur_stat)
5004 new_data = rd32(hw, reg);
5006 /* device stats are not reset at PFR, they likely will not be zeroed
5007 * when the driver starts. Thus, save the value from the first read
5008 * without adding to the statistic value so that we report stats which
5009 * count up from zero.
5011 if (!prev_stat_loaded) {
5012 *prev_stat = new_data;
5016 /* Calculate the difference between the new and old values, and then
5017 * add it to the software stat value.
5019 if (new_data >= *prev_stat)
5020 *cur_stat += new_data - *prev_stat;
5022 /* to manage the potential roll-over */
5023 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5025 /* Update the previously stored value to prepare for next read */
5026 *prev_stat = new_data;
5030 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5031 * @hw: ptr to the hardware info
5032 * @vsi_handle: VSI handle
5033 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
5034 * @cur_stats: ptr to current stats structure
5036 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
5037 * thus cannot be read using the normal ice_stat_update32 function.
5039 * Read the GLV_REPC register associated with the given VSI, and update the
5040 * rx_no_desc and rx_error values in the ice_eth_stats structure.
5042 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
5043 * cleared each time it's read.
5045 * Note that the GLV_RDPC register also counts the causes that would trigger
5046 * GLV_REPC. However, it does not give the finer grained detail about why the
5047 * packets are being dropped. The GLV_REPC values can be used to distinguish
5048 * whether Rx packets are dropped due to errors or due to no available
5052 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5053 struct ice_eth_stats *cur_stats)
5055 u16 vsi_num, no_desc, error_cnt;
5058 if (!ice_is_vsi_valid(hw, vsi_handle))
5061 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
5063 /* If we haven't loaded stats yet, just clear the current value */
5064 if (!prev_stat_loaded) {
5065 wr32(hw, GLV_REPC(vsi_num), 0);
5069 repc = rd32(hw, GLV_REPC(vsi_num));
5070 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
5071 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
5073 /* Clear the count by writing to the stats register */
5074 wr32(hw, GLV_REPC(vsi_num), 0);
5076 cur_stats->rx_no_desc += no_desc;
5077 cur_stats->rx_errors += error_cnt;
5081 * ice_sched_query_elem - query element information from HW
5082 * @hw: pointer to the HW struct
5083 * @node_teid: node TEID to be queried
5084 * @buf: buffer to element information
5086 * This function queries HW element information
5089 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5090 struct ice_aqc_txsched_elem_data *buf)
5092 u16 buf_size, num_elem_ret = 0;
5093 enum ice_status status;
5095 buf_size = sizeof(*buf);
5096 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
5097 buf->node_teid = CPU_TO_LE32(node_teid);
5098 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5100 if (status != ICE_SUCCESS || num_elem_ret != 1)
5101 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5106 * ice_get_fw_mode - returns FW mode
5107 * @hw: pointer to the HW struct
5109 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
5111 #define ICE_FW_MODE_DBG_M BIT(0)
5112 #define ICE_FW_MODE_REC_M BIT(1)
5113 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
5116 /* check the current FW mode */
5117 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
5119 if (fw_mode & ICE_FW_MODE_DBG_M)
5120 return ICE_FW_MODE_DBG;
5121 else if (fw_mode & ICE_FW_MODE_REC_M)
5122 return ICE_FW_MODE_REC;
5123 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
5124 return ICE_FW_MODE_ROLLBACK;
5126 return ICE_FW_MODE_NORMAL;
5131 * @hw: pointer to the hw struct
5132 * @topo_addr: topology address for a device to communicate with
5133 * @bus_addr: 7-bit I2C bus address
5134 * @addr: I2C memory address (I2C offset) with up to 16 bits
5135 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
5136 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
5137 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5138 * @cd: pointer to command details structure or NULL
5143 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5144 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5145 struct ice_sq_cd *cd)
5147 struct ice_aq_desc desc = { 0 };
5148 struct ice_aqc_i2c *cmd;
5149 enum ice_status status;
5152 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
5153 cmd = &desc.params.read_write_i2c;
5156 return ICE_ERR_PARAM;
5158 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5160 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5161 cmd->topo_addr = topo_addr;
5162 cmd->i2c_params = params;
5163 cmd->i2c_addr = addr;
5165 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5167 struct ice_aqc_read_i2c_resp *resp;
5170 resp = &desc.params.read_i2c_resp;
5171 for (i = 0; i < data_size; i++) {
5172 *data = resp->i2c_data[i];
5182 * @hw: pointer to the hw struct
5183 * @topo_addr: topology address for a device to communicate with
5184 * @bus_addr: 7-bit I2C bus address
5185 * @addr: I2C memory address (I2C offset) with up to 16 bits
5186 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
5187 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
5188 * @cd: pointer to command details structure or NULL
5190 * Write I2C (0x06E3)
5193 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5194 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5195 struct ice_sq_cd *cd)
5197 struct ice_aq_desc desc = { 0 };
5198 struct ice_aqc_i2c *cmd;
5201 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
5202 cmd = &desc.params.read_write_i2c;
5204 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5206 /* data_size limited to 4 */
5208 return ICE_ERR_PARAM;
5210 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5211 cmd->topo_addr = topo_addr;
5212 cmd->i2c_params = params;
5213 cmd->i2c_addr = addr;
5215 for (i = 0; i < data_size; i++) {
5216 cmd->i2c_data[i] = *data;
5220 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5225 * @hw: pointer to the hw struct
5226 * @gpio_ctrl_handle: GPIO controller node handle
5227 * @pin_idx: IO Number of the GPIO that needs to be set
5228 * @value: SW provide IO value to set in the LSB
5229 * @cd: pointer to command details structure or NULL
5231 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
5234 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
5235 struct ice_sq_cd *cd)
5237 struct ice_aqc_gpio *cmd;
5238 struct ice_aq_desc desc;
5240 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
5241 cmd = &desc.params.read_write_gpio;
5242 cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5243 cmd->gpio_num = pin_idx;
5244 cmd->gpio_val = value ? 1 : 0;
5246 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5251 * @hw: pointer to the hw struct
5252 * @gpio_ctrl_handle: GPIO controller node handle
5253 * @pin_idx: IO Number of the GPIO that needs to be set
5254 * @value: IO value read
5255 * @cd: pointer to command details structure or NULL
5257 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
5261 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5262 bool *value, struct ice_sq_cd *cd)
5264 struct ice_aqc_gpio *cmd;
5265 struct ice_aq_desc desc;
5266 enum ice_status status;
5268 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5269 cmd = &desc.params.read_write_gpio;
5270 cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5271 cmd->gpio_num = pin_idx;
5273 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5277 *value = !!cmd->gpio_val;
5282 * ice_fw_supports_link_override
5283 * @hw: pointer to the hardware structure
5285 * Checks if the firmware supports link override
5287 bool ice_fw_supports_link_override(struct ice_hw *hw)
5289 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5290 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5292 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5293 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5295 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5303 * ice_get_link_default_override
5304 * @ldo: pointer to the link default override struct
5305 * @pi: pointer to the port info struct
5307 * Gets the link default override for a port
5310 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5311 struct ice_port_info *pi)
5313 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5314 struct ice_hw *hw = pi->hw;
5315 enum ice_status status;
5317 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5318 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5320 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5324 /* Each port has its own config; calculate for our port */
5325 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5326 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5328 /* link options first */
5329 status = ice_read_sr_word(hw, tlv_start, &buf);
5331 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5334 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5335 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5336 ICE_LINK_OVERRIDE_PHY_CFG_S;
5338 /* link PHY config */
5339 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5340 status = ice_read_sr_word(hw, offset, &buf);
5342 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5345 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5348 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5349 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5350 status = ice_read_sr_word(hw, (offset + i), &buf);
5352 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5355 /* shift 16 bits at a time to fill 64 bits */
5356 ldo->phy_type_low |= ((u64)buf << (i * 16));
5359 /* PHY types high */
5360 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5361 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5362 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5363 status = ice_read_sr_word(hw, (offset + i), &buf);
5365 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5368 /* shift 16 bits at a time to fill 64 bits */
5369 ldo->phy_type_high |= ((u64)buf << (i * 16));
5376 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5377 * @caps: get PHY capability data
5379 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5381 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5382 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5383 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5384 ICE_AQC_PHY_AN_EN_CLAUSE37))
5391 * ice_aq_set_lldp_mib - Set the LLDP MIB
5392 * @hw: pointer to the HW struct
5393 * @mib_type: Local, Remote or both Local and Remote MIBs
5394 * @buf: pointer to the caller-supplied buffer to store the MIB block
5395 * @buf_size: size of the buffer (in bytes)
5396 * @cd: pointer to command details structure or NULL
5398 * Set the LLDP MIB. (0x0A08)
5401 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5402 struct ice_sq_cd *cd)
5404 struct ice_aqc_lldp_set_local_mib *cmd;
5405 struct ice_aq_desc desc;
5407 cmd = &desc.params.lldp_set_mib;
5409 if (buf_size == 0 || !buf)
5410 return ICE_ERR_PARAM;
5412 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5414 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
5415 desc.datalen = CPU_TO_LE16(buf_size);
5417 cmd->type = mib_type;
5418 cmd->length = CPU_TO_LE16(buf_size);
5420 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5424 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
5425 * @hw: pointer to HW struct
5427 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5429 if (hw->mac_type != ICE_MAC_E810)
5432 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5433 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5435 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5436 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5438 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5445 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5446 * @hw: pointer to HW struct
5447 * @vsi_num: absolute HW index for VSI
5448 * @add: boolean for if adding or removing a filter
5451 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5453 struct ice_aqc_lldp_filter_ctrl *cmd;
5454 struct ice_aq_desc desc;
5456 cmd = &desc.params.lldp_filter_ctrl;
5458 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5461 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5463 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5465 cmd->vsi_num = CPU_TO_LE16(vsi_num);
5467 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5471 * ice_fw_supports_report_dflt_cfg
5472 * @hw: pointer to the hardware structure
5474 * Checks if the firmware supports report default configuration
5476 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5478 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5479 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5481 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5482 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5484 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {