1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 300
15 * dump_phy_type - helper function that prints PHY type strings
16 * @hw: pointer to the HW structure
17 * @phy: 64 bit PHY type to decipher
18 * @i: bit index within phy
19 * @phy_string: string corresponding to bit i in phy
20 * @prefix: prefix string to differentiate multiple dumps
23 dump_phy_type(struct ice_hw *hw, u64 phy, u8 i, const char *phy_string,
27 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", prefix, i,
32 * ice_dump_phy_type_low - helper function to dump phy_type_low
33 * @hw: pointer to the HW structure
34 * @low: 64 bit value for phy_type_low
35 * @prefix: prefix string to differentiate multiple dumps
38 ice_dump_phy_type_low(struct ice_hw *hw, u64 low, const char *prefix)
40 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
41 (unsigned long long)low);
43 dump_phy_type(hw, low, 0, "100BASE_TX", prefix);
44 dump_phy_type(hw, low, 1, "100M_SGMII", prefix);
45 dump_phy_type(hw, low, 2, "1000BASE_T", prefix);
46 dump_phy_type(hw, low, 3, "1000BASE_SX", prefix);
47 dump_phy_type(hw, low, 4, "1000BASE_LX", prefix);
48 dump_phy_type(hw, low, 5, "1000BASE_KX", prefix);
49 dump_phy_type(hw, low, 6, "1G_SGMII", prefix);
50 dump_phy_type(hw, low, 7, "2500BASE_T", prefix);
51 dump_phy_type(hw, low, 8, "2500BASE_X", prefix);
52 dump_phy_type(hw, low, 9, "2500BASE_KX", prefix);
53 dump_phy_type(hw, low, 10, "5GBASE_T", prefix);
54 dump_phy_type(hw, low, 11, "5GBASE_KR", prefix);
55 dump_phy_type(hw, low, 12, "10GBASE_T", prefix);
56 dump_phy_type(hw, low, 13, "10G_SFI_DA", prefix);
57 dump_phy_type(hw, low, 14, "10GBASE_SR", prefix);
58 dump_phy_type(hw, low, 15, "10GBASE_LR", prefix);
59 dump_phy_type(hw, low, 16, "10GBASE_KR_CR1", prefix);
60 dump_phy_type(hw, low, 17, "10G_SFI_AOC_ACC", prefix);
61 dump_phy_type(hw, low, 18, "10G_SFI_C2C", prefix);
62 dump_phy_type(hw, low, 19, "25GBASE_T", prefix);
63 dump_phy_type(hw, low, 20, "25GBASE_CR", prefix);
64 dump_phy_type(hw, low, 21, "25GBASE_CR_S", prefix);
65 dump_phy_type(hw, low, 22, "25GBASE_CR1", prefix);
66 dump_phy_type(hw, low, 23, "25GBASE_SR", prefix);
67 dump_phy_type(hw, low, 24, "25GBASE_LR", prefix);
68 dump_phy_type(hw, low, 25, "25GBASE_KR", prefix);
69 dump_phy_type(hw, low, 26, "25GBASE_KR_S", prefix);
70 dump_phy_type(hw, low, 27, "25GBASE_KR1", prefix);
71 dump_phy_type(hw, low, 28, "25G_AUI_AOC_ACC", prefix);
72 dump_phy_type(hw, low, 29, "25G_AUI_C2C", prefix);
73 dump_phy_type(hw, low, 30, "40GBASE_CR4", prefix);
74 dump_phy_type(hw, low, 31, "40GBASE_SR4", prefix);
75 dump_phy_type(hw, low, 32, "40GBASE_LR4", prefix);
76 dump_phy_type(hw, low, 33, "40GBASE_KR4", prefix);
77 dump_phy_type(hw, low, 34, "40G_XLAUI_AOC_ACC", prefix);
78 dump_phy_type(hw, low, 35, "40G_XLAUI", prefix);
79 dump_phy_type(hw, low, 36, "50GBASE_CR2", prefix);
80 dump_phy_type(hw, low, 37, "50GBASE_SR2", prefix);
81 dump_phy_type(hw, low, 38, "50GBASE_LR2", prefix);
82 dump_phy_type(hw, low, 39, "50GBASE_KR2", prefix);
83 dump_phy_type(hw, low, 40, "50G_LAUI2_AOC_ACC", prefix);
84 dump_phy_type(hw, low, 41, "50G_LAUI2", prefix);
85 dump_phy_type(hw, low, 42, "50G_AUI2_AOC_ACC", prefix);
86 dump_phy_type(hw, low, 43, "50G_AUI2", prefix);
87 dump_phy_type(hw, low, 44, "50GBASE_CP", prefix);
88 dump_phy_type(hw, low, 45, "50GBASE_SR", prefix);
89 dump_phy_type(hw, low, 46, "50GBASE_FR", prefix);
90 dump_phy_type(hw, low, 47, "50GBASE_LR", prefix);
91 dump_phy_type(hw, low, 48, "50GBASE_KR_PAM4", prefix);
92 dump_phy_type(hw, low, 49, "50G_AUI1_AOC_ACC", prefix);
93 dump_phy_type(hw, low, 50, "50G_AUI1", prefix);
94 dump_phy_type(hw, low, 51, "100GBASE_CR4", prefix);
95 dump_phy_type(hw, low, 52, "100GBASE_SR4", prefix);
96 dump_phy_type(hw, low, 53, "100GBASE_LR4", prefix);
97 dump_phy_type(hw, low, 54, "100GBASE_KR4", prefix);
98 dump_phy_type(hw, low, 55, "100G_CAUI4_AOC_ACC", prefix);
99 dump_phy_type(hw, low, 56, "100G_CAUI4", prefix);
100 dump_phy_type(hw, low, 57, "100G_AUI4_AOC_ACC", prefix);
101 dump_phy_type(hw, low, 58, "100G_AUI4", prefix);
102 dump_phy_type(hw, low, 59, "100GBASE_CR_PAM4", prefix);
103 dump_phy_type(hw, low, 60, "100GBASE_KR_PAM4", prefix);
104 dump_phy_type(hw, low, 61, "100GBASE_CP2", prefix);
105 dump_phy_type(hw, low, 62, "100GBASE_SR2", prefix);
106 dump_phy_type(hw, low, 63, "100GBASE_DR", prefix);
110 * ice_dump_phy_type_high - helper function to dump phy_type_high
111 * @hw: pointer to the HW structure
112 * @high: 64 bit value for phy_type_high
113 * @prefix: prefix string to differentiate multiple dumps
116 ice_dump_phy_type_high(struct ice_hw *hw, u64 high, const char *prefix)
118 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
119 (unsigned long long)high);
121 dump_phy_type(hw, high, 0, "100GBASE_KR2_PAM4", prefix);
122 dump_phy_type(hw, high, 1, "100G_CAUI2_AOC_ACC", prefix);
123 dump_phy_type(hw, high, 2, "100G_CAUI2", prefix);
124 dump_phy_type(hw, high, 3, "100G_AUI2_AOC_ACC", prefix);
125 dump_phy_type(hw, high, 4, "100G_AUI2", prefix);
129 * ice_set_mac_type - Sets MAC type
130 * @hw: pointer to the HW structure
132 * This function sets the MAC type of the adapter based on the
133 * vendor ID and device ID stored in the HW structure.
135 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
137 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
139 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
140 return ICE_ERR_DEVICE_NOT_SUPPORTED;
142 switch (hw->device_id) {
143 case ICE_DEV_ID_E810C_BACKPLANE:
144 case ICE_DEV_ID_E810C_QSFP:
145 case ICE_DEV_ID_E810C_SFP:
146 case ICE_DEV_ID_E810_XXV_BACKPLANE:
147 case ICE_DEV_ID_E810_XXV_QSFP:
148 case ICE_DEV_ID_E810_XXV_SFP:
149 hw->mac_type = ICE_MAC_E810;
151 case ICE_DEV_ID_E822C_10G_BASE_T:
152 case ICE_DEV_ID_E822C_BACKPLANE:
153 case ICE_DEV_ID_E822C_QSFP:
154 case ICE_DEV_ID_E822C_SFP:
155 case ICE_DEV_ID_E822C_SGMII:
156 case ICE_DEV_ID_E822L_10G_BASE_T:
157 case ICE_DEV_ID_E822L_BACKPLANE:
158 case ICE_DEV_ID_E822L_SFP:
159 case ICE_DEV_ID_E822L_SGMII:
160 case ICE_DEV_ID_E823L_10G_BASE_T:
161 case ICE_DEV_ID_E823L_1GBE:
162 case ICE_DEV_ID_E823L_BACKPLANE:
163 case ICE_DEV_ID_E823L_QSFP:
164 case ICE_DEV_ID_E823L_SFP:
165 case ICE_DEV_ID_E823C_10G_BASE_T:
166 case ICE_DEV_ID_E823C_BACKPLANE:
167 case ICE_DEV_ID_E823C_QSFP:
168 case ICE_DEV_ID_E823C_SFP:
169 case ICE_DEV_ID_E823C_SGMII:
170 hw->mac_type = ICE_MAC_GENERIC;
173 hw->mac_type = ICE_MAC_UNKNOWN;
177 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
183 * @hw: pointer to the hardware structure
185 * returns true if mac_type is ICE_MAC_GENERIC, false if not
187 bool ice_is_generic_mac(struct ice_hw *hw)
189 return hw->mac_type == ICE_MAC_GENERIC;
194 * @hw: pointer to the hardware structure
196 * returns true if the device is E810 based, false if not.
198 bool ice_is_e810(struct ice_hw *hw)
200 return hw->mac_type == ICE_MAC_E810;
205 * @hw: pointer to the hardware structure
207 * returns true if the device is E810T based, false if not.
209 bool ice_is_e810t(struct ice_hw *hw)
211 switch (hw->device_id) {
212 case ICE_DEV_ID_E810C_SFP:
213 if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
214 hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
217 case ICE_DEV_ID_E810C_QSFP:
218 if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
229 * ice_clear_pf_cfg - Clear PF configuration
230 * @hw: pointer to the hardware structure
232 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
233 * configuration, flow director filters, etc.).
235 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
237 struct ice_aq_desc desc;
239 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
241 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
245 * ice_aq_manage_mac_read - manage MAC address read command
246 * @hw: pointer to the HW struct
247 * @buf: a virtual buffer to hold the manage MAC read response
248 * @buf_size: Size of the virtual buffer
249 * @cd: pointer to command details structure or NULL
251 * This function is used to return per PF station MAC address (0x0107).
252 * NOTE: Upon successful completion of this command, MAC address information
253 * is returned in user specified buffer. Please interpret user specified
254 * buffer as "manage_mac_read" response.
255 * Response such as various MAC addresses are stored in HW struct (port.mac)
256 * ice_discover_dev_caps is expected to be called before this function is
259 static enum ice_status
260 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
261 struct ice_sq_cd *cd)
263 struct ice_aqc_manage_mac_read_resp *resp;
264 struct ice_aqc_manage_mac_read *cmd;
265 struct ice_aq_desc desc;
266 enum ice_status status;
270 cmd = &desc.params.mac_read;
272 if (buf_size < sizeof(*resp))
273 return ICE_ERR_BUF_TOO_SHORT;
275 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
277 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
281 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
282 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
284 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
285 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
289 /* A single port can report up to two (LAN and WoL) addresses */
290 for (i = 0; i < cmd->num_addr; i++)
291 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
292 ice_memcpy(hw->port_info->mac.lan_addr,
293 resp[i].mac_addr, ETH_ALEN,
295 ice_memcpy(hw->port_info->mac.perm_addr,
297 ETH_ALEN, ICE_DMA_TO_NONDMA);
304 * ice_aq_get_phy_caps - returns PHY capabilities
305 * @pi: port information structure
306 * @qual_mods: report qualified modules
307 * @report_mode: report mode capabilities
308 * @pcaps: structure for PHY capabilities to be filled
309 * @cd: pointer to command details structure or NULL
311 * Returns the various PHY capabilities supported on the Port (0x0600)
314 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
315 struct ice_aqc_get_phy_caps_data *pcaps,
316 struct ice_sq_cd *cd)
318 struct ice_aqc_get_phy_caps *cmd;
319 u16 pcaps_size = sizeof(*pcaps);
320 struct ice_aq_desc desc;
321 enum ice_status status;
325 cmd = &desc.params.get_phy;
327 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
328 return ICE_ERR_PARAM;
331 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
332 !ice_fw_supports_report_dflt_cfg(hw))
333 return ICE_ERR_PARAM;
335 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
338 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
340 cmd->param0 |= CPU_TO_LE16(report_mode);
341 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
343 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
345 if (report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA)
346 prefix = "phy_caps_media";
347 else if (report_mode == ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA)
348 prefix = "phy_caps_no_media";
349 else if (report_mode == ICE_AQC_REPORT_ACTIVE_CFG)
350 prefix = "phy_caps_active";
351 else if (report_mode == ICE_AQC_REPORT_DFLT_CFG)
352 prefix = "phy_caps_default";
354 prefix = "phy_caps_invalid";
356 ice_dump_phy_type_low(hw, LE64_TO_CPU(pcaps->phy_type_low), prefix);
357 ice_dump_phy_type_high(hw, LE64_TO_CPU(pcaps->phy_type_high), prefix);
359 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
360 prefix, report_mode);
361 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
362 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
363 pcaps->low_power_ctrl_an);
364 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
366 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
368 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
369 pcaps->link_fec_options);
370 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
371 prefix, pcaps->module_compliance_enforcement);
372 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
373 prefix, pcaps->extended_compliance_code);
374 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
375 pcaps->module_type[0]);
376 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
377 pcaps->module_type[1]);
378 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
379 pcaps->module_type[2]);
381 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
382 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
383 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
384 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
385 sizeof(pi->phy.link_info.module_type),
386 ICE_NONDMA_TO_NONDMA);
393 * ice_aq_get_link_topo_handle - get link topology node return status
394 * @pi: port information structure
395 * @node_type: requested node type
396 * @cd: pointer to command details structure or NULL
398 * Get link topology node return status for specified node type (0x06E0)
400 * Node type cage can be used to determine if cage is present. If AQC
401 * returns error (ENOENT), then no cage present. If no cage present, then
402 * connection type is backplane or BASE-T.
404 static enum ice_status
405 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
406 struct ice_sq_cd *cd)
408 struct ice_aqc_get_link_topo *cmd;
409 struct ice_aq_desc desc;
411 cmd = &desc.params.get_link_topo;
413 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
415 cmd->addr.topo_params.node_type_ctx =
416 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
417 ICE_AQC_LINK_TOPO_NODE_CTX_S);
420 cmd->addr.topo_params.node_type_ctx |=
421 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
423 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
427 * ice_is_media_cage_present
428 * @pi: port information structure
430 * Returns true if media cage is present, else false. If no cage, then
431 * media type is backplane or BASE-T.
433 static bool ice_is_media_cage_present(struct ice_port_info *pi)
435 /* Node type cage can be used to determine if cage is present. If AQC
436 * returns error (ENOENT), then no cage present. If no cage present then
437 * connection type is backplane or BASE-T.
439 return !ice_aq_get_link_topo_handle(pi,
440 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
445 * ice_get_media_type - Gets media type
446 * @pi: port information structure
448 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
450 struct ice_link_status *hw_link_info;
453 return ICE_MEDIA_UNKNOWN;
455 hw_link_info = &pi->phy.link_info;
456 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
457 /* If more than one media type is selected, report unknown */
458 return ICE_MEDIA_UNKNOWN;
460 if (hw_link_info->phy_type_low) {
461 /* 1G SGMII is a special case where some DA cable PHYs
462 * may show this as an option when it really shouldn't
463 * be since SGMII is meant to be between a MAC and a PHY
464 * in a backplane. Try to detect this case and handle it
466 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
467 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
468 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
469 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
470 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
473 switch (hw_link_info->phy_type_low) {
474 case ICE_PHY_TYPE_LOW_1000BASE_SX:
475 case ICE_PHY_TYPE_LOW_1000BASE_LX:
476 case ICE_PHY_TYPE_LOW_10GBASE_SR:
477 case ICE_PHY_TYPE_LOW_10GBASE_LR:
478 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
479 case ICE_PHY_TYPE_LOW_25GBASE_SR:
480 case ICE_PHY_TYPE_LOW_25GBASE_LR:
481 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
482 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
483 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
484 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
485 case ICE_PHY_TYPE_LOW_50GBASE_SR:
486 case ICE_PHY_TYPE_LOW_50GBASE_FR:
487 case ICE_PHY_TYPE_LOW_50GBASE_LR:
488 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
489 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
490 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
491 case ICE_PHY_TYPE_LOW_100GBASE_DR:
492 return ICE_MEDIA_FIBER;
493 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
494 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
495 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
496 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
497 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
498 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
499 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
500 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
501 return ICE_MEDIA_FIBER;
502 case ICE_PHY_TYPE_LOW_100BASE_TX:
503 case ICE_PHY_TYPE_LOW_1000BASE_T:
504 case ICE_PHY_TYPE_LOW_2500BASE_T:
505 case ICE_PHY_TYPE_LOW_5GBASE_T:
506 case ICE_PHY_TYPE_LOW_10GBASE_T:
507 case ICE_PHY_TYPE_LOW_25GBASE_T:
508 return ICE_MEDIA_BASET;
509 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
510 case ICE_PHY_TYPE_LOW_25GBASE_CR:
511 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
512 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
513 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
514 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
515 case ICE_PHY_TYPE_LOW_50GBASE_CP:
516 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
517 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
518 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
520 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
521 case ICE_PHY_TYPE_LOW_40G_XLAUI:
522 case ICE_PHY_TYPE_LOW_50G_LAUI2:
523 case ICE_PHY_TYPE_LOW_50G_AUI2:
524 case ICE_PHY_TYPE_LOW_50G_AUI1:
525 case ICE_PHY_TYPE_LOW_100G_AUI4:
526 case ICE_PHY_TYPE_LOW_100G_CAUI4:
527 if (ice_is_media_cage_present(pi))
528 return ICE_MEDIA_AUI;
530 case ICE_PHY_TYPE_LOW_1000BASE_KX:
531 case ICE_PHY_TYPE_LOW_2500BASE_KX:
532 case ICE_PHY_TYPE_LOW_2500BASE_X:
533 case ICE_PHY_TYPE_LOW_5GBASE_KR:
534 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
535 case ICE_PHY_TYPE_LOW_25GBASE_KR:
536 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
537 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
538 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
539 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
540 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
541 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
542 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
543 return ICE_MEDIA_BACKPLANE;
546 switch (hw_link_info->phy_type_high) {
547 case ICE_PHY_TYPE_HIGH_100G_AUI2:
548 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
549 if (ice_is_media_cage_present(pi))
550 return ICE_MEDIA_AUI;
552 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
553 return ICE_MEDIA_BACKPLANE;
554 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
555 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
556 return ICE_MEDIA_FIBER;
559 return ICE_MEDIA_UNKNOWN;
563 * ice_aq_get_link_info
564 * @pi: port information structure
565 * @ena_lse: enable/disable LinkStatusEvent reporting
566 * @link: pointer to link status structure - optional
567 * @cd: pointer to command details structure or NULL
569 * Get Link Status (0x607). Returns the link status of the adapter.
572 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
573 struct ice_link_status *link, struct ice_sq_cd *cd)
575 struct ice_aqc_get_link_status_data link_data = { 0 };
576 struct ice_aqc_get_link_status *resp;
577 struct ice_link_status *li_old, *li;
578 enum ice_media_type *hw_media_type;
579 struct ice_fc_info *hw_fc_info;
580 bool tx_pause, rx_pause;
581 struct ice_aq_desc desc;
582 enum ice_status status;
587 return ICE_ERR_PARAM;
589 li_old = &pi->phy.link_info_old;
590 hw_media_type = &pi->phy.media_type;
591 li = &pi->phy.link_info;
592 hw_fc_info = &pi->fc;
594 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
595 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
596 resp = &desc.params.get_link_status;
597 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
598 resp->lport_num = pi->lport;
600 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
602 if (status != ICE_SUCCESS)
605 /* save off old link status information */
608 /* update current link status information */
609 li->link_speed = LE16_TO_CPU(link_data.link_speed);
610 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
611 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
612 *hw_media_type = ice_get_media_type(pi);
613 li->link_info = link_data.link_info;
614 li->link_cfg_err = link_data.link_cfg_err;
615 li->an_info = link_data.an_info;
616 li->ext_info = link_data.ext_info;
617 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
618 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
619 li->topo_media_conflict = link_data.topo_media_conflict;
620 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
621 ICE_AQ_CFG_PACING_TYPE_M);
624 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
625 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
626 if (tx_pause && rx_pause)
627 hw_fc_info->current_mode = ICE_FC_FULL;
629 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
631 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
633 hw_fc_info->current_mode = ICE_FC_NONE;
635 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
637 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
638 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
639 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
640 (unsigned long long)li->phy_type_low);
641 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
642 (unsigned long long)li->phy_type_high);
643 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
644 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
645 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
646 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
647 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
648 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
649 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
650 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
652 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
654 /* save link status information */
658 /* flag cleared so calling functions don't call AQ again */
659 pi->phy.get_link_info = false;
665 * ice_fill_tx_timer_and_fc_thresh
666 * @hw: pointer to the HW struct
667 * @cmd: pointer to MAC cfg structure
669 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
673 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
674 struct ice_aqc_set_mac_cfg *cmd)
676 u16 fc_thres_val, tx_timer_val;
679 /* We read back the transmit timer and fc threshold value of
680 * LFC. Thus, we will use index =
681 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
683 * Also, because we are opearating on transmit timer and fc
684 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
686 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
688 /* Retrieve the transmit timer */
689 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
691 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
692 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
694 /* Retrieve the fc threshold */
695 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
696 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
698 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
703 * @hw: pointer to the HW struct
704 * @max_frame_size: Maximum Frame Size to be supported
705 * @cd: pointer to command details structure or NULL
707 * Set MAC configuration (0x0603)
710 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
712 struct ice_aqc_set_mac_cfg *cmd;
713 struct ice_aq_desc desc;
715 cmd = &desc.params.set_mac_cfg;
717 if (max_frame_size == 0)
718 return ICE_ERR_PARAM;
720 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
722 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
724 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
726 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
730 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
731 * @hw: pointer to the HW struct
733 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
735 struct ice_switch_info *sw;
736 enum ice_status status;
738 hw->switch_info = (struct ice_switch_info *)
739 ice_malloc(hw, sizeof(*hw->switch_info));
741 sw = hw->switch_info;
744 return ICE_ERR_NO_MEMORY;
746 INIT_LIST_HEAD(&sw->vsi_list_map_head);
747 sw->prof_res_bm_init = 0;
749 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
751 ice_free(hw, hw->switch_info);
758 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
759 * @hw: pointer to the HW struct
760 * @sw: pointer to switch info struct for which function clears filters
763 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
765 struct ice_vsi_list_map_info *v_pos_map;
766 struct ice_vsi_list_map_info *v_tmp_map;
767 struct ice_sw_recipe *recps;
773 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
774 ice_vsi_list_map_info, list_entry) {
775 LIST_DEL(&v_pos_map->list_entry);
776 ice_free(hw, v_pos_map);
778 recps = sw->recp_list;
779 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
780 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
782 recps[i].root_rid = i;
783 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
784 &recps[i].rg_list, ice_recp_grp_entry,
786 LIST_DEL(&rg_entry->l_entry);
787 ice_free(hw, rg_entry);
790 if (recps[i].adv_rule) {
791 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
792 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
794 ice_destroy_lock(&recps[i].filt_rule_lock);
795 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
796 &recps[i].filt_rules,
797 ice_adv_fltr_mgmt_list_entry,
799 LIST_DEL(&lst_itr->list_entry);
800 ice_free(hw, lst_itr->lkups);
801 ice_free(hw, lst_itr);
804 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
806 ice_destroy_lock(&recps[i].filt_rule_lock);
807 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
808 &recps[i].filt_rules,
809 ice_fltr_mgmt_list_entry,
811 LIST_DEL(&lst_itr->list_entry);
812 ice_free(hw, lst_itr);
815 if (recps[i].root_buf)
816 ice_free(hw, recps[i].root_buf);
818 ice_rm_sw_replay_rule_info(hw, sw);
819 ice_free(hw, sw->recp_list);
824 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
825 * @hw: pointer to the HW struct
827 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
829 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
833 * ice_get_itr_intrl_gran
834 * @hw: pointer to the HW struct
836 * Determines the ITR/INTRL granularities based on the maximum aggregate
837 * bandwidth according to the device's configuration during power-on.
839 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
841 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
842 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
843 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
845 switch (max_agg_bw) {
846 case ICE_MAX_AGG_BW_200G:
847 case ICE_MAX_AGG_BW_100G:
848 case ICE_MAX_AGG_BW_50G:
849 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
850 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
852 case ICE_MAX_AGG_BW_25G:
853 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
854 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
860 * ice_print_rollback_msg - print FW rollback message
861 * @hw: pointer to the hardware structure
863 void ice_print_rollback_msg(struct ice_hw *hw)
865 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
866 struct ice_orom_info *orom;
867 struct ice_nvm_info *nvm;
869 orom = &hw->flash.orom;
870 nvm = &hw->flash.nvm;
872 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
873 nvm->major, nvm->minor, nvm->eetrack, orom->major,
874 orom->build, orom->patch);
876 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
877 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
881 * ice_set_umac_shared
882 * @hw: pointer to the hw struct
884 * Set boolean flag to allow unicast MAC sharing
886 void ice_set_umac_shared(struct ice_hw *hw)
888 hw->umac_shared = true;
892 * ice_init_hw - main hardware initialization routine
893 * @hw: pointer to the hardware structure
895 enum ice_status ice_init_hw(struct ice_hw *hw)
897 struct ice_aqc_get_phy_caps_data *pcaps;
898 enum ice_status status;
902 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
904 /* Set MAC type based on DeviceID */
905 status = ice_set_mac_type(hw);
909 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
910 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
911 PF_FUNC_RID_FUNCTION_NUMBER_S;
913 status = ice_reset(hw, ICE_RESET_PFR);
917 ice_get_itr_intrl_gran(hw);
919 status = ice_create_all_ctrlq(hw);
921 goto err_unroll_cqinit;
923 status = ice_init_nvm(hw);
925 goto err_unroll_cqinit;
927 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
928 ice_print_rollback_msg(hw);
930 status = ice_clear_pf_cfg(hw);
932 goto err_unroll_cqinit;
934 /* Set bit to enable Flow Director filters */
935 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
936 INIT_LIST_HEAD(&hw->fdir_list_head);
938 ice_clear_pxe_mode(hw);
940 status = ice_get_caps(hw);
942 goto err_unroll_cqinit;
944 hw->port_info = (struct ice_port_info *)
945 ice_malloc(hw, sizeof(*hw->port_info));
946 if (!hw->port_info) {
947 status = ICE_ERR_NO_MEMORY;
948 goto err_unroll_cqinit;
951 /* set the back pointer to HW */
952 hw->port_info->hw = hw;
954 /* Initialize port_info struct with switch configuration data */
955 status = ice_get_initial_sw_cfg(hw);
957 goto err_unroll_alloc;
960 /* Query the allocated resources for Tx scheduler */
961 status = ice_sched_query_res_alloc(hw);
963 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
964 goto err_unroll_alloc;
966 ice_sched_get_psm_clk_freq(hw);
968 /* Initialize port_info struct with scheduler data */
969 status = ice_sched_init_port(hw->port_info);
971 goto err_unroll_sched;
972 pcaps = (struct ice_aqc_get_phy_caps_data *)
973 ice_malloc(hw, sizeof(*pcaps));
975 status = ICE_ERR_NO_MEMORY;
976 goto err_unroll_sched;
979 /* Initialize port_info struct with PHY capabilities */
980 status = ice_aq_get_phy_caps(hw->port_info, false,
981 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
984 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
987 /* Initialize port_info struct with link information */
988 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
990 goto err_unroll_sched;
991 /* need a valid SW entry point to build a Tx tree */
992 if (!hw->sw_entry_point_layer) {
993 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
994 status = ICE_ERR_CFG;
995 goto err_unroll_sched;
997 INIT_LIST_HEAD(&hw->agg_list);
998 /* Initialize max burst size */
999 if (!hw->max_burst_size)
1000 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1001 status = ice_init_fltr_mgmt_struct(hw);
1003 goto err_unroll_sched;
1005 /* Get MAC information */
1006 /* A single port can report up to two (LAN and WoL) addresses */
1007 mac_buf = ice_calloc(hw, 2,
1008 sizeof(struct ice_aqc_manage_mac_read_resp));
1009 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1012 status = ICE_ERR_NO_MEMORY;
1013 goto err_unroll_fltr_mgmt_struct;
1016 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1017 ice_free(hw, mac_buf);
1020 goto err_unroll_fltr_mgmt_struct;
1022 /* enable jumbo frame support at MAC level */
1023 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1025 goto err_unroll_fltr_mgmt_struct;
1027 /* Obtain counter base index which would be used by flow director */
1028 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1030 goto err_unroll_fltr_mgmt_struct;
1031 status = ice_init_hw_tbls(hw);
1033 goto err_unroll_fltr_mgmt_struct;
1034 ice_init_lock(&hw->tnl_lock);
1038 err_unroll_fltr_mgmt_struct:
1039 ice_cleanup_fltr_mgmt_struct(hw);
1041 ice_sched_cleanup_all(hw);
1043 ice_free(hw, hw->port_info);
1044 hw->port_info = NULL;
1046 ice_destroy_all_ctrlq(hw);
1051 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1052 * @hw: pointer to the hardware structure
1054 * This should be called only during nominal operation, not as a result of
1055 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1056 * applicable initializations if it fails for any reason.
1058 void ice_deinit_hw(struct ice_hw *hw)
1060 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1061 ice_cleanup_fltr_mgmt_struct(hw);
1063 ice_sched_cleanup_all(hw);
1064 ice_sched_clear_agg(hw);
1066 ice_free_hw_tbls(hw);
1067 ice_destroy_lock(&hw->tnl_lock);
1069 if (hw->port_info) {
1070 ice_free(hw, hw->port_info);
1071 hw->port_info = NULL;
1074 ice_destroy_all_ctrlq(hw);
1076 /* Clear VSI contexts if not already cleared */
1077 ice_clear_all_vsi_ctx(hw);
1081 * ice_check_reset - Check to see if a global reset is complete
1082 * @hw: pointer to the hardware structure
1084 enum ice_status ice_check_reset(struct ice_hw *hw)
1086 u32 cnt, reg = 0, grst_timeout, uld_mask;
1088 /* Poll for Device Active state in case a recent CORER, GLOBR,
1089 * or EMPR has occurred. The grst delay value is in 100ms units.
1090 * Add 1sec for outstanding AQ commands that can take a long time.
1092 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1093 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1095 for (cnt = 0; cnt < grst_timeout; cnt++) {
1096 ice_msec_delay(100, true);
1097 reg = rd32(hw, GLGEN_RSTAT);
1098 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1102 if (cnt == grst_timeout) {
1103 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1104 return ICE_ERR_RESET_FAILED;
1107 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1108 GLNVM_ULD_PCIER_DONE_1_M |\
1109 GLNVM_ULD_CORER_DONE_M |\
1110 GLNVM_ULD_GLOBR_DONE_M |\
1111 GLNVM_ULD_POR_DONE_M |\
1112 GLNVM_ULD_POR_DONE_1_M |\
1113 GLNVM_ULD_PCIER_DONE_2_M)
1115 uld_mask = ICE_RESET_DONE_MASK;
1117 /* Device is Active; check Global Reset processes are done */
1118 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1119 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1120 if (reg == uld_mask) {
1121 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1124 ice_msec_delay(10, true);
1127 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1128 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1130 return ICE_ERR_RESET_FAILED;
1137 * ice_pf_reset - Reset the PF
1138 * @hw: pointer to the hardware structure
1140 * If a global reset has been triggered, this function checks
1141 * for its completion and then issues the PF reset
1143 static enum ice_status ice_pf_reset(struct ice_hw *hw)
1147 /* If at function entry a global reset was already in progress, i.e.
1148 * state is not 'device active' or any of the reset done bits are not
1149 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1150 * global reset is done.
1152 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1153 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1154 /* poll on global reset currently in progress until done */
1155 if (ice_check_reset(hw))
1156 return ICE_ERR_RESET_FAILED;
1162 reg = rd32(hw, PFGEN_CTRL);
1164 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1166 /* Wait for the PFR to complete. The wait time is the global config lock
1167 * timeout plus the PFR timeout which will account for a possible reset
1168 * that is occurring during a download package operation.
1170 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1171 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1172 reg = rd32(hw, PFGEN_CTRL);
1173 if (!(reg & PFGEN_CTRL_PFSWR_M))
1176 ice_msec_delay(1, true);
1179 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1180 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1181 return ICE_ERR_RESET_FAILED;
1188 * ice_reset - Perform different types of reset
1189 * @hw: pointer to the hardware structure
1190 * @req: reset request
1192 * This function triggers a reset as specified by the req parameter.
1195 * If anything other than a PF reset is triggered, PXE mode is restored.
1196 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1197 * interface has been restored in the rebuild flow.
1199 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1205 return ice_pf_reset(hw);
1206 case ICE_RESET_CORER:
1207 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1208 val = GLGEN_RTRIG_CORER_M;
1210 case ICE_RESET_GLOBR:
1211 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1212 val = GLGEN_RTRIG_GLOBR_M;
1215 return ICE_ERR_PARAM;
1218 val |= rd32(hw, GLGEN_RTRIG);
1219 wr32(hw, GLGEN_RTRIG, val);
1222 /* wait for the FW to be ready */
1223 return ice_check_reset(hw);
1227 * ice_copy_rxq_ctx_to_hw
1228 * @hw: pointer to the hardware structure
1229 * @ice_rxq_ctx: pointer to the rxq context
1230 * @rxq_index: the index of the Rx queue
1232 * Copies rxq context from dense structure to HW register space
1234 static enum ice_status
1235 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1240 return ICE_ERR_BAD_PTR;
1242 if (rxq_index > QRX_CTRL_MAX_INDEX)
1243 return ICE_ERR_PARAM;
1245 /* Copy each dword separately to HW */
1246 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1247 wr32(hw, QRX_CONTEXT(i, rxq_index),
1248 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1250 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1251 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1257 /* LAN Rx Queue Context */
1258 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1259 /* Field Width LSB */
1260 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1261 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1262 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1263 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1264 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1265 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1266 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1267 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1268 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1269 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1270 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1271 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1272 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1273 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1274 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1275 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1276 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1277 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1278 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1279 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1285 * @hw: pointer to the hardware structure
1286 * @rlan_ctx: pointer to the rxq context
1287 * @rxq_index: the index of the Rx queue
1289 * Converts rxq context from sparse to dense structure and then writes
1290 * it to HW register space and enables the hardware to prefetch descriptors
1291 * instead of only fetching them on demand
1294 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1297 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1300 return ICE_ERR_BAD_PTR;
1302 rlan_ctx->prefena = 1;
1304 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1305 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1310 * @hw: pointer to the hardware structure
1311 * @rxq_index: the index of the Rx queue to clear
1313 * Clears rxq context in HW register space
1315 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1319 if (rxq_index > QRX_CTRL_MAX_INDEX)
1320 return ICE_ERR_PARAM;
1322 /* Clear each dword register separately */
1323 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1324 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1329 /* LAN Tx Queue Context */
1330 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1331 /* Field Width LSB */
1332 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1333 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1334 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1335 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1336 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1337 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1338 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1339 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1340 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1341 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1342 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1343 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1344 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1345 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1346 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1347 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1348 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1349 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1350 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1351 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1352 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1353 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1354 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1355 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1356 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1357 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1358 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1359 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1360 ICE_CTX_STORE(ice_tlan_ctx, gsc_ena, 1, 172),
1365 * ice_copy_tx_cmpltnq_ctx_to_hw
1366 * @hw: pointer to the hardware structure
1367 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1368 * @tx_cmpltnq_index: the index of the completion queue
1370 * Copies Tx completion queue context from dense structure to HW register space
1372 static enum ice_status
1373 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1374 u32 tx_cmpltnq_index)
1378 if (!ice_tx_cmpltnq_ctx)
1379 return ICE_ERR_BAD_PTR;
1381 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1382 return ICE_ERR_PARAM;
1384 /* Copy each dword separately to HW */
1385 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1386 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1387 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1389 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1390 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1396 /* LAN Tx Completion Queue Context */
1397 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1398 /* Field Width LSB */
1399 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1400 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1401 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1402 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1403 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1404 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1405 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1406 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1407 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1408 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1413 * ice_write_tx_cmpltnq_ctx
1414 * @hw: pointer to the hardware structure
1415 * @tx_cmpltnq_ctx: pointer to the completion queue context
1416 * @tx_cmpltnq_index: the index of the completion queue
1418 * Converts completion queue context from sparse to dense structure and then
1419 * writes it to HW register space
1422 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1423 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1424 u32 tx_cmpltnq_index)
1426 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1428 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1429 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1433 * ice_clear_tx_cmpltnq_ctx
1434 * @hw: pointer to the hardware structure
1435 * @tx_cmpltnq_index: the index of the completion queue to clear
1437 * Clears Tx completion queue context in HW register space
1440 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1444 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1445 return ICE_ERR_PARAM;
1447 /* Clear each dword register separately */
1448 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1449 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1455 * ice_copy_tx_drbell_q_ctx_to_hw
1456 * @hw: pointer to the hardware structure
1457 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1458 * @tx_drbell_q_index: the index of the doorbell queue
1460 * Copies doorbell queue context from dense structure to HW register space
1462 static enum ice_status
1463 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1464 u32 tx_drbell_q_index)
1468 if (!ice_tx_drbell_q_ctx)
1469 return ICE_ERR_BAD_PTR;
1471 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1472 return ICE_ERR_PARAM;
1474 /* Copy each dword separately to HW */
1475 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1476 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1477 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1479 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1480 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1486 /* LAN Tx Doorbell Queue Context info */
1487 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1488 /* Field Width LSB */
1489 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1490 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1491 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1492 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1493 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1494 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1495 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1496 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1497 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1498 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1499 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1504 * ice_write_tx_drbell_q_ctx
1505 * @hw: pointer to the hardware structure
1506 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1507 * @tx_drbell_q_index: the index of the doorbell queue
1509 * Converts doorbell queue context from sparse to dense structure and then
1510 * writes it to HW register space
1513 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1514 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1515 u32 tx_drbell_q_index)
1517 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1519 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1520 ice_tx_drbell_q_ctx_info);
1521 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1525 * ice_clear_tx_drbell_q_ctx
1526 * @hw: pointer to the hardware structure
1527 * @tx_drbell_q_index: the index of the doorbell queue to clear
1529 * Clears doorbell queue context in HW register space
1532 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1536 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1537 return ICE_ERR_PARAM;
1539 /* Clear each dword register separately */
1540 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1541 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1546 /* Sideband Queue command wrappers */
1549 * ice_get_sbq - returns the right control queue to use for sideband
1550 * @hw: pointer to the hardware structure
1552 static struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
1554 if (!ice_is_generic_mac(hw))
1560 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1561 * @hw: pointer to the HW struct
1562 * @desc: descriptor describing the command
1563 * @buf: buffer to use for indirect commands (NULL for direct commands)
1564 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1565 * @cd: pointer to command details structure
1567 static enum ice_status
1568 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1569 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1571 return ice_sq_send_cmd(hw, ice_get_sbq(hw), (struct ice_aq_desc *)desc,
1576 * ice_sbq_send_cmd_nolock - send Sideband Queue command to Sideband Queue
1577 * but do not lock sq_lock
1578 * @hw: pointer to the HW struct
1579 * @desc: descriptor describing the command
1580 * @buf: buffer to use for indirect commands (NULL for direct commands)
1581 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1582 * @cd: pointer to command details structure
1584 static enum ice_status
1585 ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1586 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1588 return ice_sq_send_cmd_nolock(hw, ice_get_sbq(hw),
1589 (struct ice_aq_desc *)desc, buf,
1594 * ice_sbq_rw_reg_lp - Fill Sideband Queue command, with lock parameter
1595 * @hw: pointer to the HW struct
1596 * @in: message info to be filled in descriptor
1597 * @lock: true to lock the sq_lock (the usual case); false if the sq_lock has
1598 * already been locked at a higher level
1600 enum ice_status ice_sbq_rw_reg_lp(struct ice_hw *hw,
1601 struct ice_sbq_msg_input *in, bool lock)
1603 struct ice_sbq_cmd_desc desc = {0};
1604 struct ice_sbq_msg_req msg = {0};
1605 enum ice_status status;
1608 msg_len = sizeof(msg);
1610 msg.dest_dev = in->dest_dev;
1611 msg.opcode = in->opcode;
1612 msg.flags = ICE_SBQ_MSG_FLAGS;
1613 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1614 msg.msg_addr_low = CPU_TO_LE16(in->msg_addr_low);
1615 msg.msg_addr_high = CPU_TO_LE32(in->msg_addr_high);
1618 msg.data = CPU_TO_LE32(in->data);
1620 /* data read comes back in completion, so shorten the struct by
1623 msg_len -= sizeof(msg.data);
1625 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
1626 desc.opcode = CPU_TO_LE16(ice_sbq_opc_neigh_dev_req);
1627 desc.param0.cmd_len = CPU_TO_LE16(msg_len);
1629 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1631 status = ice_sbq_send_cmd_nolock(hw, &desc, &msg, msg_len,
1633 if (!status && !in->opcode)
1634 in->data = LE32_TO_CPU
1635 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1640 * ice_sbq_rw_reg - Fill Sideband Queue command
1641 * @hw: pointer to the HW struct
1642 * @in: message info to be filled in descriptor
1644 enum ice_status ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1646 return ice_sbq_rw_reg_lp(hw, in, true);
1650 * ice_sbq_lock - Lock the sideband queue's sq_lock
1651 * @hw: pointer to the HW struct
1653 void ice_sbq_lock(struct ice_hw *hw)
1655 ice_acquire_lock(&ice_get_sbq(hw)->sq_lock);
1659 * ice_sbq_unlock - Unlock the sideband queue's sq_lock
1660 * @hw: pointer to the HW struct
1662 void ice_sbq_unlock(struct ice_hw *hw)
1664 ice_release_lock(&ice_get_sbq(hw)->sq_lock);
1667 /* FW Admin Queue command wrappers */
1670 * ice_should_retry_sq_send_cmd
1671 * @opcode: AQ opcode
1673 * Decide if we should retry the send command routine for the ATQ, depending
1676 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1679 case ice_aqc_opc_get_link_topo:
1680 case ice_aqc_opc_lldp_stop:
1681 case ice_aqc_opc_lldp_start:
1682 case ice_aqc_opc_lldp_filter_ctrl:
1690 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1691 * @hw: pointer to the HW struct
1692 * @cq: pointer to the specific Control queue
1693 * @desc: prefilled descriptor describing the command
1694 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1695 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1696 * @cd: pointer to command details structure
1698 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1699 * Queue if the EBUSY AQ error is returned.
1701 static enum ice_status
1702 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1703 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1704 struct ice_sq_cd *cd)
1706 struct ice_aq_desc desc_cpy;
1707 enum ice_status status;
1708 bool is_cmd_for_retry;
1713 opcode = LE16_TO_CPU(desc->opcode);
1714 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1715 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1717 if (is_cmd_for_retry) {
1719 buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1721 return ICE_ERR_NO_MEMORY;
1724 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1725 ICE_NONDMA_TO_NONDMA);
1729 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1731 if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1732 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1736 ice_memcpy(buf, buf_cpy, buf_size,
1737 ICE_NONDMA_TO_NONDMA);
1739 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1740 ICE_NONDMA_TO_NONDMA);
1742 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1744 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1747 ice_free(hw, buf_cpy);
1753 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1754 * @hw: pointer to the HW struct
1755 * @desc: descriptor describing the command
1756 * @buf: buffer to use for indirect commands (NULL for direct commands)
1757 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1758 * @cd: pointer to command details structure
1760 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1763 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1764 u16 buf_size, struct ice_sq_cd *cd)
1766 if (hw->aq_send_cmd_fn) {
1767 enum ice_status status = ICE_ERR_NOT_READY;
1768 u16 retval = ICE_AQ_RC_OK;
1770 ice_acquire_lock(&hw->adminq.sq_lock);
1771 if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
1773 retval = LE16_TO_CPU(desc->retval);
1774 /* strip off FW internal code */
1777 if (retval == ICE_AQ_RC_OK)
1778 status = ICE_SUCCESS;
1780 status = ICE_ERR_AQ_ERROR;
1783 hw->adminq.sq_last_status = (enum ice_aq_err)retval;
1784 ice_release_lock(&hw->adminq.sq_lock);
1788 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1793 * @hw: pointer to the HW struct
1794 * @cd: pointer to command details structure or NULL
1796 * Get the firmware version (0x0001) from the admin queue commands
1798 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1800 struct ice_aqc_get_ver *resp;
1801 struct ice_aq_desc desc;
1802 enum ice_status status;
1804 resp = &desc.params.get_ver;
1806 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1808 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1811 hw->fw_branch = resp->fw_branch;
1812 hw->fw_maj_ver = resp->fw_major;
1813 hw->fw_min_ver = resp->fw_minor;
1814 hw->fw_patch = resp->fw_patch;
1815 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1816 hw->api_branch = resp->api_branch;
1817 hw->api_maj_ver = resp->api_major;
1818 hw->api_min_ver = resp->api_minor;
1819 hw->api_patch = resp->api_patch;
1826 * ice_aq_send_driver_ver
1827 * @hw: pointer to the HW struct
1828 * @dv: driver's major, minor version
1829 * @cd: pointer to command details structure or NULL
1831 * Send the driver version (0x0002) to the firmware
1834 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1835 struct ice_sq_cd *cd)
1837 struct ice_aqc_driver_ver *cmd;
1838 struct ice_aq_desc desc;
1841 cmd = &desc.params.driver_ver;
1844 return ICE_ERR_PARAM;
1846 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1848 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1849 cmd->major_ver = dv->major_ver;
1850 cmd->minor_ver = dv->minor_ver;
1851 cmd->build_ver = dv->build_ver;
1852 cmd->subbuild_ver = dv->subbuild_ver;
1855 while (len < sizeof(dv->driver_string) &&
1856 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1859 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1864 * @hw: pointer to the HW struct
1865 * @unloading: is the driver unloading itself
1867 * Tell the Firmware that we're shutting down the AdminQ and whether
1868 * or not the driver is unloading as well (0x0003).
1870 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1872 struct ice_aqc_q_shutdown *cmd;
1873 struct ice_aq_desc desc;
1875 cmd = &desc.params.q_shutdown;
1877 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1880 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1882 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1887 * @hw: pointer to the HW struct
1889 * @access: access type
1890 * @sdp_number: resource number
1891 * @timeout: the maximum time in ms that the driver may hold the resource
1892 * @cd: pointer to command details structure or NULL
1894 * Requests common resource using the admin queue commands (0x0008).
1895 * When attempting to acquire the Global Config Lock, the driver can
1896 * learn of three states:
1897 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1898 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1899 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1900 * successfully downloaded the package; the driver does
1901 * not have to download the package and can continue
1904 * Note that if the caller is in an acquire lock, perform action, release lock
1905 * phase of operation, it is possible that the FW may detect a timeout and issue
1906 * a CORER. In this case, the driver will receive a CORER interrupt and will
1907 * have to determine its cause. The calling thread that is handling this flow
1908 * will likely get an error propagated back to it indicating the Download
1909 * Package, Update Package or the Release Resource AQ commands timed out.
1911 static enum ice_status
1912 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1913 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1914 struct ice_sq_cd *cd)
1916 struct ice_aqc_req_res *cmd_resp;
1917 struct ice_aq_desc desc;
1918 enum ice_status status;
1920 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1922 cmd_resp = &desc.params.res_owner;
1924 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1926 cmd_resp->res_id = CPU_TO_LE16(res);
1927 cmd_resp->access_type = CPU_TO_LE16(access);
1928 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1929 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1932 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1934 /* The completion specifies the maximum time in ms that the driver
1935 * may hold the resource in the Timeout field.
1938 /* Global config lock response utilizes an additional status field.
1940 * If the Global config lock resource is held by some other driver, the
1941 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1942 * and the timeout field indicates the maximum time the current owner
1943 * of the resource has to free it.
1945 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1946 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1947 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1949 } else if (LE16_TO_CPU(cmd_resp->status) ==
1950 ICE_AQ_RES_GLBL_IN_PROG) {
1951 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1952 return ICE_ERR_AQ_ERROR;
1953 } else if (LE16_TO_CPU(cmd_resp->status) ==
1954 ICE_AQ_RES_GLBL_DONE) {
1955 return ICE_ERR_AQ_NO_WORK;
1958 /* invalid FW response, force a timeout immediately */
1960 return ICE_ERR_AQ_ERROR;
1963 /* If the resource is held by some other driver, the command completes
1964 * with a busy return value and the timeout field indicates the maximum
1965 * time the current owner of the resource has to free it.
1967 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1968 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1974 * ice_aq_release_res
1975 * @hw: pointer to the HW struct
1977 * @sdp_number: resource number
1978 * @cd: pointer to command details structure or NULL
1980 * release common resource using the admin queue commands (0x0009)
1982 static enum ice_status
1983 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1984 struct ice_sq_cd *cd)
1986 struct ice_aqc_req_res *cmd;
1987 struct ice_aq_desc desc;
1989 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1991 cmd = &desc.params.res_owner;
1993 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1995 cmd->res_id = CPU_TO_LE16(res);
1996 cmd->res_number = CPU_TO_LE32(sdp_number);
1998 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2003 * @hw: pointer to the HW structure
2005 * @access: access type (read or write)
2006 * @timeout: timeout in milliseconds
2008 * This function will attempt to acquire the ownership of a resource.
2011 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2012 enum ice_aq_res_access_type access, u32 timeout)
2014 #define ICE_RES_POLLING_DELAY_MS 10
2015 u32 delay = ICE_RES_POLLING_DELAY_MS;
2016 u32 time_left = timeout;
2017 enum ice_status status;
2019 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2021 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2023 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
2024 * previously acquired the resource and performed any necessary updates;
2025 * in this case the caller does not obtain the resource and has no
2026 * further work to do.
2028 if (status == ICE_ERR_AQ_NO_WORK)
2029 goto ice_acquire_res_exit;
2032 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
2034 /* If necessary, poll until the current lock owner timeouts */
2035 timeout = time_left;
2036 while (status && timeout && time_left) {
2037 ice_msec_delay(delay, true);
2038 timeout = (timeout > delay) ? timeout - delay : 0;
2039 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2041 if (status == ICE_ERR_AQ_NO_WORK)
2042 /* lock free, but no work to do */
2049 if (status && status != ICE_ERR_AQ_NO_WORK)
2050 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2052 ice_acquire_res_exit:
2053 if (status == ICE_ERR_AQ_NO_WORK) {
2054 if (access == ICE_RES_WRITE)
2055 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2057 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
2064 * @hw: pointer to the HW structure
2067 * This function will release a resource using the proper Admin Command.
2069 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2071 enum ice_status status;
2072 u32 total_delay = 0;
2074 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2076 status = ice_aq_release_res(hw, res, 0, NULL);
2078 /* there are some rare cases when trying to release the resource
2079 * results in an admin queue timeout, so handle them correctly
2081 while ((status == ICE_ERR_AQ_TIMEOUT) &&
2082 (total_delay < hw->adminq.sq_cmd_timeout)) {
2083 ice_msec_delay(1, true);
2084 status = ice_aq_release_res(hw, res, 0, NULL);
2090 * ice_aq_alloc_free_res - command to allocate/free resources
2091 * @hw: pointer to the HW struct
2092 * @num_entries: number of resource entries in buffer
2093 * @buf: Indirect buffer to hold data parameters and response
2094 * @buf_size: size of buffer for indirect commands
2095 * @opc: pass in the command opcode
2096 * @cd: pointer to command details structure or NULL
2098 * Helper function to allocate/free resources using the admin queue commands
2101 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
2102 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2103 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2105 struct ice_aqc_alloc_free_res_cmd *cmd;
2106 struct ice_aq_desc desc;
2108 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2110 cmd = &desc.params.sw_res_ctrl;
2113 return ICE_ERR_PARAM;
2115 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
2116 return ICE_ERR_PARAM;
2118 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2120 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2122 cmd->num_entries = CPU_TO_LE16(num_entries);
2124 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2128 * ice_alloc_hw_res - allocate resource
2129 * @hw: pointer to the HW struct
2130 * @type: type of resource
2131 * @num: number of resources to allocate
2132 * @btm: allocate from bottom
2133 * @res: pointer to array that will receive the resources
2136 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2138 struct ice_aqc_alloc_free_res_elem *buf;
2139 enum ice_status status;
2142 buf_len = ice_struct_size(buf, elem, num);
2143 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2145 return ICE_ERR_NO_MEMORY;
2147 /* Prepare buffer to allocate resource. */
2148 buf->num_elems = CPU_TO_LE16(num);
2149 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2150 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2152 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2154 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2155 ice_aqc_opc_alloc_res, NULL);
2157 goto ice_alloc_res_exit;
2159 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
2160 ICE_NONDMA_TO_NONDMA);
2168 * ice_free_hw_res - free allocated HW resource
2169 * @hw: pointer to the HW struct
2170 * @type: type of resource to free
2171 * @num: number of resources
2172 * @res: pointer to array that contains the resources to free
2174 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2176 struct ice_aqc_alloc_free_res_elem *buf;
2177 enum ice_status status;
2180 buf_len = ice_struct_size(buf, elem, num);
2181 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2183 return ICE_ERR_NO_MEMORY;
2185 /* Prepare buffer to free resource. */
2186 buf->num_elems = CPU_TO_LE16(num);
2187 buf->res_type = CPU_TO_LE16(type);
2188 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2189 ICE_NONDMA_TO_NONDMA);
2191 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2192 ice_aqc_opc_free_res, NULL);
2194 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2201 * ice_get_num_per_func - determine number of resources per PF
2202 * @hw: pointer to the HW structure
2203 * @max: value to be evenly split between each PF
2205 * Determine the number of valid functions by going through the bitmap returned
2206 * from parsing capabilities and use this to calculate the number of resources
2207 * per PF based on the max value passed in.
2209 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2213 #define ICE_CAPS_VALID_FUNCS_M 0xFF
2214 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2215 ICE_CAPS_VALID_FUNCS_M);
2224 * ice_parse_common_caps - parse common device/function capabilities
2225 * @hw: pointer to the HW struct
2226 * @caps: pointer to common capabilities structure
2227 * @elem: the capability element to parse
2228 * @prefix: message prefix for tracing capabilities
2230 * Given a capability element, extract relevant details into the common
2231 * capability structure.
2233 * Returns: true if the capability matches one of the common capability ids,
2237 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2238 struct ice_aqc_list_caps_elem *elem, const char *prefix)
2240 u32 logical_id = LE32_TO_CPU(elem->logical_id);
2241 u32 phys_id = LE32_TO_CPU(elem->phys_id);
2242 u32 number = LE32_TO_CPU(elem->number);
2243 u16 cap = LE16_TO_CPU(elem->cap);
2247 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2248 caps->valid_functions = number;
2249 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2250 caps->valid_functions);
2252 case ICE_AQC_CAPS_DCB:
2253 caps->dcb = (number == 1);
2254 caps->active_tc_bitmap = logical_id;
2255 caps->maxtc = phys_id;
2256 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2257 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2258 caps->active_tc_bitmap);
2259 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2261 case ICE_AQC_CAPS_RSS:
2262 caps->rss_table_size = number;
2263 caps->rss_table_entry_width = logical_id;
2264 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2265 caps->rss_table_size);
2266 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2267 caps->rss_table_entry_width);
2269 case ICE_AQC_CAPS_RXQS:
2270 caps->num_rxq = number;
2271 caps->rxq_first_id = phys_id;
2272 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2274 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2275 caps->rxq_first_id);
2277 case ICE_AQC_CAPS_TXQS:
2278 caps->num_txq = number;
2279 caps->txq_first_id = phys_id;
2280 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2282 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2283 caps->txq_first_id);
2285 case ICE_AQC_CAPS_MSIX:
2286 caps->num_msix_vectors = number;
2287 caps->msix_vector_first_id = phys_id;
2288 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2289 caps->num_msix_vectors);
2290 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2291 caps->msix_vector_first_id);
2293 case ICE_AQC_CAPS_NVM_MGMT:
2294 caps->sec_rev_disabled =
2295 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2297 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2298 caps->sec_rev_disabled);
2299 caps->update_disabled =
2300 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2302 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2303 caps->update_disabled);
2304 caps->nvm_unified_update =
2305 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2307 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2308 caps->nvm_unified_update);
2310 case ICE_AQC_CAPS_MAX_MTU:
2311 caps->max_mtu = number;
2312 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2313 prefix, caps->max_mtu);
2315 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2316 caps->pcie_reset_avoidance = (number > 0);
2317 ice_debug(hw, ICE_DBG_INIT,
2318 "%s: pcie_reset_avoidance = %d\n", prefix,
2319 caps->pcie_reset_avoidance);
2321 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2322 caps->reset_restrict_support = (number == 1);
2323 ice_debug(hw, ICE_DBG_INIT,
2324 "%s: reset_restrict_support = %d\n", prefix,
2325 caps->reset_restrict_support);
2327 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2328 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2329 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2330 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2332 u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
2334 caps->ext_topo_dev_img_ver_high[index] = number;
2335 caps->ext_topo_dev_img_ver_low[index] = logical_id;
2336 caps->ext_topo_dev_img_part_num[index] =
2337 (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2338 ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2339 caps->ext_topo_dev_img_load_en[index] =
2340 (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2341 caps->ext_topo_dev_img_prog_en[index] =
2342 (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2343 ice_debug(hw, ICE_DBG_INIT,
2344 "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
2346 caps->ext_topo_dev_img_ver_high[index]);
2347 ice_debug(hw, ICE_DBG_INIT,
2348 "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
2350 caps->ext_topo_dev_img_ver_low[index]);
2351 ice_debug(hw, ICE_DBG_INIT,
2352 "%s: ext_topo_dev_img_part_num[%d] = %d\n",
2354 caps->ext_topo_dev_img_part_num[index]);
2355 ice_debug(hw, ICE_DBG_INIT,
2356 "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2358 caps->ext_topo_dev_img_load_en[index]);
2359 ice_debug(hw, ICE_DBG_INIT,
2360 "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2362 caps->ext_topo_dev_img_prog_en[index]);
2366 /* Not one of the recognized common capabilities */
2374 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2375 * @hw: pointer to the HW structure
2376 * @caps: pointer to capabilities structure to fix
2378 * Re-calculate the capabilities that are dependent on the number of physical
2379 * ports; i.e. some features are not supported or function differently on
2380 * devices with more than 4 ports.
2383 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2385 /* This assumes device capabilities are always scanned before function
2386 * capabilities during the initialization flow.
2388 if (hw->dev_caps.num_funcs > 4) {
2389 /* Max 4 TCs per port */
2391 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2397 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2398 * @hw: pointer to the HW struct
2399 * @func_p: pointer to function capabilities structure
2400 * @cap: pointer to the capability element to parse
2402 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2405 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2406 struct ice_aqc_list_caps_elem *cap)
2408 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2409 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2410 LE32_TO_CPU(cap->number));
2411 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2412 func_p->guar_num_vsi);
2416 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
2417 * @hw: pointer to the HW struct
2418 * @func_p: pointer to function capabilities structure
2419 * @cap: pointer to the capability element to parse
2421 * Extract function capabilities for ICE_AQC_CAPS_1588.
2424 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2425 struct ice_aqc_list_caps_elem *cap)
2427 struct ice_ts_func_info *info = &func_p->ts_func_info;
2428 u32 number = LE32_TO_CPU(cap->number);
2430 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2431 func_p->common_cap.ieee_1588 = info->ena;
2433 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2434 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2435 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2436 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2438 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2439 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2441 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2442 info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2444 /* Unknown clock frequency, so assume a (probably incorrect)
2445 * default to avoid out-of-bounds look ups of frequency
2446 * related information.
2448 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2450 info->time_ref = ICE_TIME_REF_FREQ_25_000;
2453 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2454 func_p->common_cap.ieee_1588);
2455 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2456 info->src_tmr_owned);
2457 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2459 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2460 info->tmr_index_owned);
2461 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2462 info->tmr_index_assoc);
2463 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2465 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2470 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2471 * @hw: pointer to the HW struct
2472 * @func_p: pointer to function capabilities structure
2474 * Extract function capabilities for ICE_AQC_CAPS_FD.
2477 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2481 if (hw->dcf_enabled)
2483 reg_val = rd32(hw, GLQF_FD_SIZE);
2484 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2485 GLQF_FD_SIZE_FD_GSIZE_S;
2486 func_p->fd_fltr_guar =
2487 ice_get_num_per_func(hw, val);
2488 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2489 GLQF_FD_SIZE_FD_BSIZE_S;
2490 func_p->fd_fltr_best_effort = val;
2492 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2493 func_p->fd_fltr_guar);
2494 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2495 func_p->fd_fltr_best_effort);
2499 * ice_parse_func_caps - Parse function capabilities
2500 * @hw: pointer to the HW struct
2501 * @func_p: pointer to function capabilities structure
2502 * @buf: buffer containing the function capability records
2503 * @cap_count: the number of capabilities
2505 * Helper function to parse function (0x000A) capabilities list. For
2506 * capabilities shared between device and function, this relies on
2507 * ice_parse_common_caps.
2509 * Loop through the list of provided capabilities and extract the relevant
2510 * data into the function capabilities structured.
2513 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2514 void *buf, u32 cap_count)
2516 struct ice_aqc_list_caps_elem *cap_resp;
2519 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2521 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2523 for (i = 0; i < cap_count; i++) {
2524 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2527 found = ice_parse_common_caps(hw, &func_p->common_cap,
2528 &cap_resp[i], "func caps");
2531 case ICE_AQC_CAPS_VSI:
2532 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2534 case ICE_AQC_CAPS_1588:
2535 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2537 case ICE_AQC_CAPS_FD:
2538 ice_parse_fdir_func_caps(hw, func_p);
2541 /* Don't list common capabilities as unknown */
2543 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2549 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2553 * ice_func_id_to_logical_id - map from function id to logical pf id
2554 * @active_function_bitmap: active function bitmap
2555 * @pf_id: function number of device
2557 static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id)
2562 for (i = 0; i < pf_id; i++)
2563 if (active_function_bitmap & BIT(i))
2570 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2571 * @hw: pointer to the HW struct
2572 * @dev_p: pointer to device capabilities structure
2573 * @cap: capability element to parse
2575 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2578 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2579 struct ice_aqc_list_caps_elem *cap)
2581 u32 number = LE32_TO_CPU(cap->number);
2583 dev_p->num_funcs = ice_hweight32(number);
2584 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2587 hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id);
2591 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2592 * @hw: pointer to the HW struct
2593 * @dev_p: pointer to device capabilities structure
2594 * @cap: capability element to parse
2596 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2599 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2600 struct ice_aqc_list_caps_elem *cap)
2602 u32 number = LE32_TO_CPU(cap->number);
2604 dev_p->num_vsi_allocd_to_host = number;
2605 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2606 dev_p->num_vsi_allocd_to_host);
2610 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
2611 * @hw: pointer to the HW struct
2612 * @dev_p: pointer to device capabilities structure
2613 * @cap: capability element to parse
2615 * Parse ICE_AQC_CAPS_1588 for device capabilities.
2618 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2619 struct ice_aqc_list_caps_elem *cap)
2621 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2622 u32 logical_id = LE32_TO_CPU(cap->logical_id);
2623 u32 phys_id = LE32_TO_CPU(cap->phys_id);
2624 u32 number = LE32_TO_CPU(cap->number);
2626 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2627 dev_p->common_cap.ieee_1588 = info->ena;
2629 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2630 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2631 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2633 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2634 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2635 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2637 info->ena_ports = logical_id;
2638 info->tmr_own_map = phys_id;
2640 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2641 dev_p->common_cap.ieee_1588);
2642 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2644 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2646 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2648 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2650 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2652 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2654 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2656 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2661 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2662 * @hw: pointer to the HW struct
2663 * @dev_p: pointer to device capabilities structure
2664 * @cap: capability element to parse
2666 * Parse ICE_AQC_CAPS_FD for device capabilities.
2669 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2670 struct ice_aqc_list_caps_elem *cap)
2672 u32 number = LE32_TO_CPU(cap->number);
2674 dev_p->num_flow_director_fltr = number;
2675 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2676 dev_p->num_flow_director_fltr);
2680 * ice_parse_dev_caps - Parse device capabilities
2681 * @hw: pointer to the HW struct
2682 * @dev_p: pointer to device capabilities structure
2683 * @buf: buffer containing the device capability records
2684 * @cap_count: the number of capabilities
2686 * Helper device to parse device (0x000B) capabilities list. For
2687 * capabilities shared between device and function, this relies on
2688 * ice_parse_common_caps.
2690 * Loop through the list of provided capabilities and extract the relevant
2691 * data into the device capabilities structured.
2694 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2695 void *buf, u32 cap_count)
2697 struct ice_aqc_list_caps_elem *cap_resp;
2700 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2702 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2704 for (i = 0; i < cap_count; i++) {
2705 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2708 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2709 &cap_resp[i], "dev caps");
2712 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2713 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2715 case ICE_AQC_CAPS_VSI:
2716 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2718 case ICE_AQC_CAPS_1588:
2719 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2721 case ICE_AQC_CAPS_FD:
2722 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2725 /* Don't list common capabilities as unknown */
2727 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2733 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2737 * ice_aq_list_caps - query function/device capabilities
2738 * @hw: pointer to the HW struct
2739 * @buf: a buffer to hold the capabilities
2740 * @buf_size: size of the buffer
2741 * @cap_count: if not NULL, set to the number of capabilities reported
2742 * @opc: capabilities type to discover, device or function
2743 * @cd: pointer to command details structure or NULL
2745 * Get the function (0x000A) or device (0x000B) capabilities description from
2746 * firmware and store it in the buffer.
2748 * If the cap_count pointer is not NULL, then it is set to the number of
2749 * capabilities firmware will report. Note that if the buffer size is too
2750 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2751 * cap_count will still be updated in this case. It is recommended that the
2752 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2753 * firmware could return) to avoid this.
2755 static enum ice_status
2756 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2757 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2759 struct ice_aqc_list_caps *cmd;
2760 struct ice_aq_desc desc;
2761 enum ice_status status;
2763 cmd = &desc.params.get_cap;
2765 if (opc != ice_aqc_opc_list_func_caps &&
2766 opc != ice_aqc_opc_list_dev_caps)
2767 return ICE_ERR_PARAM;
2769 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2770 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2773 *cap_count = LE32_TO_CPU(cmd->count);
2779 * ice_discover_dev_caps - Read and extract device capabilities
2780 * @hw: pointer to the hardware structure
2781 * @dev_caps: pointer to device capabilities structure
2783 * Read the device capabilities and extract them into the dev_caps structure
2786 static enum ice_status
2787 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2789 enum ice_status status;
2793 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2795 return ICE_ERR_NO_MEMORY;
2797 /* Although the driver doesn't know the number of capabilities the
2798 * device will return, we can simply send a 4KB buffer, the maximum
2799 * possible size that firmware can return.
2801 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2803 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2804 ice_aqc_opc_list_dev_caps, NULL);
2806 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2813 * ice_discover_func_caps - Read and extract function capabilities
2814 * @hw: pointer to the hardware structure
2815 * @func_caps: pointer to function capabilities structure
2817 * Read the function capabilities and extract them into the func_caps structure
2820 static enum ice_status
2821 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2823 enum ice_status status;
2827 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2829 return ICE_ERR_NO_MEMORY;
2831 /* Although the driver doesn't know the number of capabilities the
2832 * device will return, we can simply send a 4KB buffer, the maximum
2833 * possible size that firmware can return.
2835 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2837 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2838 ice_aqc_opc_list_func_caps, NULL);
2840 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2847 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2848 * @hw: pointer to the hardware structure
2850 void ice_set_safe_mode_caps(struct ice_hw *hw)
2852 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2853 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2854 struct ice_hw_common_caps cached_caps;
2857 /* cache some func_caps values that should be restored after memset */
2858 cached_caps = func_caps->common_cap;
2860 /* unset func capabilities */
2861 memset(func_caps, 0, sizeof(*func_caps));
2863 #define ICE_RESTORE_FUNC_CAP(name) \
2864 func_caps->common_cap.name = cached_caps.name
2866 /* restore cached values */
2867 ICE_RESTORE_FUNC_CAP(valid_functions);
2868 ICE_RESTORE_FUNC_CAP(txq_first_id);
2869 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2870 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2871 ICE_RESTORE_FUNC_CAP(max_mtu);
2872 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2874 /* one Tx and one Rx queue in safe mode */
2875 func_caps->common_cap.num_rxq = 1;
2876 func_caps->common_cap.num_txq = 1;
2878 /* two MSIX vectors, one for traffic and one for misc causes */
2879 func_caps->common_cap.num_msix_vectors = 2;
2880 func_caps->guar_num_vsi = 1;
2882 /* cache some dev_caps values that should be restored after memset */
2883 cached_caps = dev_caps->common_cap;
2884 num_funcs = dev_caps->num_funcs;
2886 /* unset dev capabilities */
2887 memset(dev_caps, 0, sizeof(*dev_caps));
2889 #define ICE_RESTORE_DEV_CAP(name) \
2890 dev_caps->common_cap.name = cached_caps.name
2892 /* restore cached values */
2893 ICE_RESTORE_DEV_CAP(valid_functions);
2894 ICE_RESTORE_DEV_CAP(txq_first_id);
2895 ICE_RESTORE_DEV_CAP(rxq_first_id);
2896 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2897 ICE_RESTORE_DEV_CAP(max_mtu);
2898 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2899 dev_caps->num_funcs = num_funcs;
2901 /* one Tx and one Rx queue per function in safe mode */
2902 dev_caps->common_cap.num_rxq = num_funcs;
2903 dev_caps->common_cap.num_txq = num_funcs;
2905 /* two MSIX vectors per function */
2906 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2910 * ice_get_caps - get info about the HW
2911 * @hw: pointer to the hardware structure
2913 enum ice_status ice_get_caps(struct ice_hw *hw)
2915 enum ice_status status;
2917 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2921 return ice_discover_func_caps(hw, &hw->func_caps);
2925 * ice_aq_manage_mac_write - manage MAC address write command
2926 * @hw: pointer to the HW struct
2927 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2928 * @flags: flags to control write behavior
2929 * @cd: pointer to command details structure or NULL
2931 * This function is used to write MAC address to the NVM (0x0108).
2934 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2935 struct ice_sq_cd *cd)
2937 struct ice_aqc_manage_mac_write *cmd;
2938 struct ice_aq_desc desc;
2940 cmd = &desc.params.mac_write;
2941 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2944 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2946 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2950 * ice_aq_clear_pxe_mode
2951 * @hw: pointer to the HW struct
2953 * Tell the firmware that the driver is taking over from PXE (0x0110).
2955 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2957 struct ice_aq_desc desc;
2959 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2960 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2962 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2966 * ice_clear_pxe_mode - clear pxe operations mode
2967 * @hw: pointer to the HW struct
2969 * Make sure all PXE mode settings are cleared, including things
2970 * like descriptor fetch/write-back mode.
2972 void ice_clear_pxe_mode(struct ice_hw *hw)
2974 if (ice_check_sq_alive(hw, &hw->adminq))
2975 ice_aq_clear_pxe_mode(hw);
2979 * ice_aq_set_port_params - set physical port parameters.
2980 * @pi: pointer to the port info struct
2981 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2982 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2983 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2984 * @double_vlan: if set double VLAN is enabled
2985 * @cd: pointer to command details structure or NULL
2987 * Set Physical port parameters (0x0203)
2990 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2991 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2992 struct ice_sq_cd *cd)
2995 struct ice_aqc_set_port_params *cmd;
2996 struct ice_hw *hw = pi->hw;
2997 struct ice_aq_desc desc;
3000 cmd = &desc.params.set_port_params;
3002 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
3003 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
3005 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
3007 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
3009 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
3010 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
3012 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3016 * ice_is_100m_speed_supported
3017 * @hw: pointer to the HW struct
3019 * returns true if 100M speeds are supported by the device,
3022 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3024 switch (hw->device_id) {
3025 case ICE_DEV_ID_E822C_10G_BASE_T:
3026 case ICE_DEV_ID_E822C_SGMII:
3027 case ICE_DEV_ID_E822L_10G_BASE_T:
3028 case ICE_DEV_ID_E822L_SGMII:
3029 case ICE_DEV_ID_E823L_10G_BASE_T:
3030 case ICE_DEV_ID_E823L_1GBE:
3038 * ice_get_link_speed_based_on_phy_type - returns link speed
3039 * @phy_type_low: lower part of phy_type
3040 * @phy_type_high: higher part of phy_type
3042 * This helper function will convert an entry in PHY type structure
3043 * [phy_type_low, phy_type_high] to its corresponding link speed.
3044 * Note: In the structure of [phy_type_low, phy_type_high], there should
3045 * be one bit set, as this function will convert one PHY type to its
3047 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
3048 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
3051 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3053 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3054 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3056 switch (phy_type_low) {
3057 case ICE_PHY_TYPE_LOW_100BASE_TX:
3058 case ICE_PHY_TYPE_LOW_100M_SGMII:
3059 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3061 case ICE_PHY_TYPE_LOW_1000BASE_T:
3062 case ICE_PHY_TYPE_LOW_1000BASE_SX:
3063 case ICE_PHY_TYPE_LOW_1000BASE_LX:
3064 case ICE_PHY_TYPE_LOW_1000BASE_KX:
3065 case ICE_PHY_TYPE_LOW_1G_SGMII:
3066 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3068 case ICE_PHY_TYPE_LOW_2500BASE_T:
3069 case ICE_PHY_TYPE_LOW_2500BASE_X:
3070 case ICE_PHY_TYPE_LOW_2500BASE_KX:
3071 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3073 case ICE_PHY_TYPE_LOW_5GBASE_T:
3074 case ICE_PHY_TYPE_LOW_5GBASE_KR:
3075 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3077 case ICE_PHY_TYPE_LOW_10GBASE_T:
3078 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3079 case ICE_PHY_TYPE_LOW_10GBASE_SR:
3080 case ICE_PHY_TYPE_LOW_10GBASE_LR:
3081 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3082 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3083 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3084 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3086 case ICE_PHY_TYPE_LOW_25GBASE_T:
3087 case ICE_PHY_TYPE_LOW_25GBASE_CR:
3088 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3089 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3090 case ICE_PHY_TYPE_LOW_25GBASE_SR:
3091 case ICE_PHY_TYPE_LOW_25GBASE_LR:
3092 case ICE_PHY_TYPE_LOW_25GBASE_KR:
3093 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3094 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3095 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3096 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3097 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3099 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3100 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3101 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3102 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3103 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3104 case ICE_PHY_TYPE_LOW_40G_XLAUI:
3105 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3107 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3108 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3109 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3110 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3111 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3112 case ICE_PHY_TYPE_LOW_50G_LAUI2:
3113 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3114 case ICE_PHY_TYPE_LOW_50G_AUI2:
3115 case ICE_PHY_TYPE_LOW_50GBASE_CP:
3116 case ICE_PHY_TYPE_LOW_50GBASE_SR:
3117 case ICE_PHY_TYPE_LOW_50GBASE_FR:
3118 case ICE_PHY_TYPE_LOW_50GBASE_LR:
3119 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3120 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3121 case ICE_PHY_TYPE_LOW_50G_AUI1:
3122 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3124 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3125 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3126 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3127 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3128 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3129 case ICE_PHY_TYPE_LOW_100G_CAUI4:
3130 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3131 case ICE_PHY_TYPE_LOW_100G_AUI4:
3132 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3133 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3134 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3135 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3136 case ICE_PHY_TYPE_LOW_100GBASE_DR:
3137 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3140 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3144 switch (phy_type_high) {
3145 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3146 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3147 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3148 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3149 case ICE_PHY_TYPE_HIGH_100G_AUI2:
3150 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3153 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3157 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3158 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3159 return ICE_AQ_LINK_SPEED_UNKNOWN;
3160 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3161 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3162 return ICE_AQ_LINK_SPEED_UNKNOWN;
3163 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3164 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3165 return speed_phy_type_low;
3167 return speed_phy_type_high;
3171 * ice_update_phy_type
3172 * @phy_type_low: pointer to the lower part of phy_type
3173 * @phy_type_high: pointer to the higher part of phy_type
3174 * @link_speeds_bitmap: targeted link speeds bitmap
3176 * Note: For the link_speeds_bitmap structure, you can check it at
3177 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3178 * link_speeds_bitmap include multiple speeds.
3180 * Each entry in this [phy_type_low, phy_type_high] structure will
3181 * present a certain link speed. This helper function will turn on bits
3182 * in [phy_type_low, phy_type_high] structure based on the value of
3183 * link_speeds_bitmap input parameter.
3186 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3187 u16 link_speeds_bitmap)
3194 /* We first check with low part of phy_type */
3195 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3196 pt_low = BIT_ULL(index);
3197 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3199 if (link_speeds_bitmap & speed)
3200 *phy_type_low |= BIT_ULL(index);
3203 /* We then check with high part of phy_type */
3204 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3205 pt_high = BIT_ULL(index);
3206 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3208 if (link_speeds_bitmap & speed)
3209 *phy_type_high |= BIT_ULL(index);
3214 * ice_aq_set_phy_cfg
3215 * @hw: pointer to the HW struct
3216 * @pi: port info structure of the interested logical port
3217 * @cfg: structure with PHY configuration data to be set
3218 * @cd: pointer to command details structure or NULL
3220 * Set the various PHY configuration parameters supported on the Port.
3221 * One or more of the Set PHY config parameters may be ignored in an MFP
3222 * mode as the PF may not have the privilege to set some of the PHY Config
3223 * parameters. This status will be indicated by the command response (0x0601).
3226 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3227 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3229 struct ice_aq_desc desc;
3230 enum ice_status status;
3233 return ICE_ERR_PARAM;
3235 /* Ensure that only valid bits of cfg->caps can be turned on. */
3236 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3237 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3240 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3243 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3244 desc.params.set_phy.lport_num = pi->lport;
3245 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3247 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3248 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
3249 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3250 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
3251 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3252 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
3253 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
3254 cfg->low_power_ctrl_an);
3255 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
3256 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
3257 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
3260 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3262 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3263 status = ICE_SUCCESS;
3266 pi->phy.curr_user_phy_cfg = *cfg;
3272 * ice_update_link_info - update status of the HW network link
3273 * @pi: port info structure of the interested logical port
3275 enum ice_status ice_update_link_info(struct ice_port_info *pi)
3277 struct ice_link_status *li;
3278 enum ice_status status;
3281 return ICE_ERR_PARAM;
3283 li = &pi->phy.link_info;
3285 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3289 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3290 struct ice_aqc_get_phy_caps_data *pcaps;
3294 pcaps = (struct ice_aqc_get_phy_caps_data *)
3295 ice_malloc(hw, sizeof(*pcaps));
3297 return ICE_ERR_NO_MEMORY;
3299 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3302 if (status == ICE_SUCCESS)
3303 ice_memcpy(li->module_type, &pcaps->module_type,
3304 sizeof(li->module_type),
3305 ICE_NONDMA_TO_NONDMA);
3307 ice_free(hw, pcaps);
3314 * ice_cache_phy_user_req
3315 * @pi: port information structure
3316 * @cache_data: PHY logging data
3317 * @cache_mode: PHY logging mode
3319 * Log the user request on (FC, FEC, SPEED) for later user.
3322 ice_cache_phy_user_req(struct ice_port_info *pi,
3323 struct ice_phy_cache_mode_data cache_data,
3324 enum ice_phy_cache_mode cache_mode)
3329 switch (cache_mode) {
3331 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3333 case ICE_SPEED_MODE:
3334 pi->phy.curr_user_speed_req =
3335 cache_data.data.curr_user_speed_req;
3338 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3346 * ice_caps_to_fc_mode
3347 * @caps: PHY capabilities
3349 * Convert PHY FC capabilities to ice FC mode
3351 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3353 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3354 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3357 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3358 return ICE_FC_TX_PAUSE;
3360 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3361 return ICE_FC_RX_PAUSE;
3367 * ice_caps_to_fec_mode
3368 * @caps: PHY capabilities
3369 * @fec_options: Link FEC options
3371 * Convert PHY FEC capabilities to ice FEC mode
3373 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3375 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3376 return ICE_FEC_AUTO;
3378 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3379 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3380 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3381 ICE_AQC_PHY_FEC_25G_KR_REQ))
3382 return ICE_FEC_BASER;
3384 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3385 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3386 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3389 return ICE_FEC_NONE;
3393 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3394 * @pi: port information structure
3395 * @cfg: PHY configuration data to set FC mode
3396 * @req_mode: FC mode to configure
3398 static enum ice_status
3399 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3400 enum ice_fc_mode req_mode)
3402 struct ice_phy_cache_mode_data cache_data;
3403 u8 pause_mask = 0x0;
3406 return ICE_ERR_BAD_PTR;
3411 struct ice_aqc_get_phy_caps_data *pcaps;
3412 enum ice_status status;
3414 pcaps = (struct ice_aqc_get_phy_caps_data *)
3415 ice_malloc(pi->hw, sizeof(*pcaps));
3417 return ICE_ERR_NO_MEMORY;
3419 /* Query the value of FC that both the NIC and attached media
3422 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3425 ice_free(pi->hw, pcaps);
3429 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3430 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3432 ice_free(pi->hw, pcaps);
3436 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3437 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3439 case ICE_FC_RX_PAUSE:
3440 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3442 case ICE_FC_TX_PAUSE:
3443 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3449 /* clear the old pause settings */
3450 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3451 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3453 /* set the new capabilities */
3454 cfg->caps |= pause_mask;
3456 /* Cache user FC request */
3457 cache_data.data.curr_user_fc_req = req_mode;
3458 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3465 * @pi: port information structure
3466 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3467 * @ena_auto_link_update: enable automatic link update
3469 * Set the requested flow control mode.
3472 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3474 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3475 struct ice_aqc_get_phy_caps_data *pcaps;
3476 enum ice_status status;
3479 if (!pi || !aq_failures)
3480 return ICE_ERR_BAD_PTR;
3485 pcaps = (struct ice_aqc_get_phy_caps_data *)
3486 ice_malloc(hw, sizeof(*pcaps));
3488 return ICE_ERR_NO_MEMORY;
3490 /* Get the current PHY config */
3491 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3495 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3499 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3501 /* Configure the set PHY data */
3502 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3504 if (status != ICE_ERR_BAD_PTR)
3505 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3510 /* If the capabilities have changed, then set the new config */
3511 if (cfg.caps != pcaps->caps) {
3512 int retry_count, retry_max = 10;
3514 /* Auto restart link so settings take effect */
3515 if (ena_auto_link_update)
3516 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3518 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3520 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3524 /* Update the link info
3525 * It sometimes takes a really long time for link to
3526 * come back from the atomic reset. Thus, we wait a
3529 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3530 status = ice_update_link_info(pi);
3532 if (status == ICE_SUCCESS)
3535 ice_msec_delay(100, true);
3539 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3543 ice_free(hw, pcaps);
3548 * ice_phy_caps_equals_cfg
3549 * @phy_caps: PHY capabilities
3550 * @phy_cfg: PHY configuration
3552 * Helper function to determine if PHY capabilities matches PHY
3556 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3557 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3559 u8 caps_mask, cfg_mask;
3561 if (!phy_caps || !phy_cfg)
3564 /* These bits are not common between capabilities and configuration.
3565 * Do not use them to determine equality.
3567 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3568 ICE_AQC_PHY_EN_MOD_QUAL);
3569 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3571 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3572 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3573 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3574 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3575 phy_caps->eee_cap != phy_cfg->eee_cap ||
3576 phy_caps->eeer_value != phy_cfg->eeer_value ||
3577 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3584 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3585 * @pi: port information structure
3586 * @caps: PHY ability structure to copy data from
3587 * @cfg: PHY configuration structure to copy data to
3589 * Helper function to copy AQC PHY get ability data to PHY set configuration
3593 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3594 struct ice_aqc_get_phy_caps_data *caps,
3595 struct ice_aqc_set_phy_cfg_data *cfg)
3597 if (!pi || !caps || !cfg)
3600 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3601 cfg->phy_type_low = caps->phy_type_low;
3602 cfg->phy_type_high = caps->phy_type_high;
3603 cfg->caps = caps->caps;
3604 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3605 cfg->eee_cap = caps->eee_cap;
3606 cfg->eeer_value = caps->eeer_value;
3607 cfg->link_fec_opt = caps->link_fec_options;
3608 cfg->module_compliance_enforcement =
3609 caps->module_compliance_enforcement;
3613 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3614 * @pi: port information structure
3615 * @cfg: PHY configuration data to set FEC mode
3616 * @fec: FEC mode to configure
3619 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3620 enum ice_fec_mode fec)
3622 struct ice_aqc_get_phy_caps_data *pcaps;
3623 enum ice_status status = ICE_SUCCESS;
3627 return ICE_ERR_BAD_PTR;
3631 pcaps = (struct ice_aqc_get_phy_caps_data *)
3632 ice_malloc(hw, sizeof(*pcaps));
3634 return ICE_ERR_NO_MEMORY;
3636 status = ice_aq_get_phy_caps(pi, false,
3637 (ice_fw_supports_report_dflt_cfg(hw) ?
3638 ICE_AQC_REPORT_DFLT_CFG :
3639 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3644 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3645 cfg->link_fec_opt = pcaps->link_fec_options;
3649 /* Clear RS bits, and AND BASE-R ability
3650 * bits and OR request bits.
3652 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3653 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3654 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3655 ICE_AQC_PHY_FEC_25G_KR_REQ;
3658 /* Clear BASE-R bits, and AND RS ability
3659 * bits and OR request bits.
3661 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3662 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3663 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3666 /* Clear all FEC option bits. */
3667 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3670 /* AND auto FEC bit, and all caps bits. */
3671 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3672 cfg->link_fec_opt |= pcaps->link_fec_options;
3675 status = ICE_ERR_PARAM;
3679 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3680 !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3681 struct ice_link_default_override_tlv tlv;
3683 if (ice_get_link_default_override(&tlv, pi))
3686 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3687 (tlv.options & ICE_LINK_OVERRIDE_EN))
3688 cfg->link_fec_opt = tlv.fec_options;
3692 ice_free(hw, pcaps);
3698 * ice_get_link_status - get status of the HW network link
3699 * @pi: port information structure
3700 * @link_up: pointer to bool (true/false = linkup/linkdown)
3702 * Variable link_up is true if link is up, false if link is down.
3703 * The variable link_up is invalid if status is non zero. As a
3704 * result of this call, link status reporting becomes enabled
3706 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3708 struct ice_phy_info *phy_info;
3709 enum ice_status status = ICE_SUCCESS;
3711 if (!pi || !link_up)
3712 return ICE_ERR_PARAM;
3714 phy_info = &pi->phy;
3716 if (phy_info->get_link_info) {
3717 status = ice_update_link_info(pi);
3720 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3724 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3730 * ice_aq_set_link_restart_an
3731 * @pi: pointer to the port information structure
3732 * @ena_link: if true: enable link, if false: disable link
3733 * @cd: pointer to command details structure or NULL
3735 * Sets up the link and restarts the Auto-Negotiation over the link.
3738 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3739 struct ice_sq_cd *cd)
3741 struct ice_aqc_restart_an *cmd;
3742 struct ice_aq_desc desc;
3744 cmd = &desc.params.restart_an;
3746 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3748 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3749 cmd->lport_num = pi->lport;
3751 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3753 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3755 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3759 * ice_aq_set_event_mask
3760 * @hw: pointer to the HW struct
3761 * @port_num: port number of the physical function
3762 * @mask: event mask to be set
3763 * @cd: pointer to command details structure or NULL
3765 * Set event mask (0x0613)
3768 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3769 struct ice_sq_cd *cd)
3771 struct ice_aqc_set_event_mask *cmd;
3772 struct ice_aq_desc desc;
3774 cmd = &desc.params.set_event_mask;
3776 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3778 cmd->lport_num = port_num;
3780 cmd->event_mask = CPU_TO_LE16(mask);
3781 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3785 * ice_aq_set_mac_loopback
3786 * @hw: pointer to the HW struct
3787 * @ena_lpbk: Enable or Disable loopback
3788 * @cd: pointer to command details structure or NULL
3790 * Enable/disable loopback on a given port
3793 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3795 struct ice_aqc_set_mac_lb *cmd;
3796 struct ice_aq_desc desc;
3798 cmd = &desc.params.set_mac_lb;
3800 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3802 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3804 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3808 * ice_aq_set_port_id_led
3809 * @pi: pointer to the port information
3810 * @is_orig_mode: is this LED set to original mode (by the net-list)
3811 * @cd: pointer to command details structure or NULL
3813 * Set LED value for the given port (0x06e9)
3816 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3817 struct ice_sq_cd *cd)
3819 struct ice_aqc_set_port_id_led *cmd;
3820 struct ice_hw *hw = pi->hw;
3821 struct ice_aq_desc desc;
3823 cmd = &desc.params.set_port_id_led;
3825 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3828 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3830 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3832 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3837 * @hw: pointer to the HW struct
3838 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3839 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3840 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3842 * @set_page: set or ignore the page
3843 * @data: pointer to data buffer to be read/written to the I2C device.
3844 * @length: 1-16 for read, 1 for write.
3845 * @write: 0 read, 1 for write.
3846 * @cd: pointer to command details structure or NULL
3848 * Read/Write SFF EEPROM (0x06EE)
3851 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3852 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3853 bool write, struct ice_sq_cd *cd)
3855 struct ice_aqc_sff_eeprom *cmd;
3856 struct ice_aq_desc desc;
3857 enum ice_status status;
3859 if (!data || (mem_addr & 0xff00))
3860 return ICE_ERR_PARAM;
3862 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3863 cmd = &desc.params.read_write_sff_param;
3864 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3865 cmd->lport_num = (u8)(lport & 0xff);
3866 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3867 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3868 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3870 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3871 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3872 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3873 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3875 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3877 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3882 * ice_aq_prog_topo_dev_nvm
3883 * @hw: pointer to the hardware structure
3884 * @topo_params: pointer to structure storing topology parameters for a device
3885 * @cd: pointer to command details structure or NULL
3887 * Program Topology Device NVM (0x06F2)
3891 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
3892 struct ice_aqc_link_topo_params *topo_params,
3893 struct ice_sq_cd *cd)
3895 struct ice_aqc_prog_topo_dev_nvm *cmd;
3896 struct ice_aq_desc desc;
3898 cmd = &desc.params.prog_topo_dev_nvm;
3900 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
3902 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3903 ICE_NONDMA_TO_NONDMA);
3905 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3909 * ice_aq_read_topo_dev_nvm
3910 * @hw: pointer to the hardware structure
3911 * @topo_params: pointer to structure storing topology parameters for a device
3912 * @start_address: byte offset in the topology device NVM
3913 * @data: pointer to data buffer
3914 * @data_size: number of bytes to be read from the topology device NVM
3915 * @cd: pointer to command details structure or NULL
3916 * Read Topology Device NVM (0x06F3)
3920 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
3921 struct ice_aqc_link_topo_params *topo_params,
3922 u32 start_address, u8 *data, u8 data_size,
3923 struct ice_sq_cd *cd)
3925 struct ice_aqc_read_topo_dev_nvm *cmd;
3926 struct ice_aq_desc desc;
3927 enum ice_status status;
3929 if (!data || data_size == 0 ||
3930 data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
3931 return ICE_ERR_PARAM;
3933 cmd = &desc.params.read_topo_dev_nvm;
3935 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
3937 desc.datalen = data_size;
3938 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3939 ICE_NONDMA_TO_NONDMA);
3940 cmd->start_address = CPU_TO_LE32(start_address);
3942 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3946 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
3952 * __ice_aq_get_set_rss_lut
3953 * @hw: pointer to the hardware structure
3954 * @params: RSS LUT parameters
3955 * @set: set true to set the table, false to get the table
3957 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3959 static enum ice_status
3960 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3962 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3963 struct ice_aqc_get_set_rss_lut *cmd_resp;
3964 struct ice_aq_desc desc;
3965 enum ice_status status;
3969 return ICE_ERR_PARAM;
3971 vsi_handle = params->vsi_handle;
3974 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3975 return ICE_ERR_PARAM;
3977 lut_size = params->lut_size;
3978 lut_type = params->lut_type;
3979 glob_lut_idx = params->global_lut_id;
3980 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3982 cmd_resp = &desc.params.get_set_rss_lut;
3985 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3986 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3988 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3991 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3992 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3993 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3994 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3997 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3998 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3999 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
4000 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
4001 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
4004 status = ICE_ERR_PARAM;
4005 goto ice_aq_get_set_rss_lut_exit;
4008 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
4009 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
4010 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
4013 goto ice_aq_get_set_rss_lut_send;
4014 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
4016 goto ice_aq_get_set_rss_lut_send;
4018 goto ice_aq_get_set_rss_lut_send;
4021 /* LUT size is only valid for Global and PF table types */
4023 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
4024 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
4025 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4026 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4028 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
4029 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
4030 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4031 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4033 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
4034 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
4035 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
4036 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
4037 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
4042 status = ICE_ERR_PARAM;
4043 goto ice_aq_get_set_rss_lut_exit;
4046 ice_aq_get_set_rss_lut_send:
4047 cmd_resp->flags = CPU_TO_LE16(flags);
4048 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4050 ice_aq_get_set_rss_lut_exit:
4055 * ice_aq_get_rss_lut
4056 * @hw: pointer to the hardware structure
4057 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4059 * get the RSS lookup table, PF or VSI type
4062 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4064 return __ice_aq_get_set_rss_lut(hw, get_params, false);
4068 * ice_aq_set_rss_lut
4069 * @hw: pointer to the hardware structure
4070 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4072 * set the RSS lookup table, PF or VSI type
4075 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4077 return __ice_aq_get_set_rss_lut(hw, set_params, true);
4081 * __ice_aq_get_set_rss_key
4082 * @hw: pointer to the HW struct
4083 * @vsi_id: VSI FW index
4084 * @key: pointer to key info struct
4085 * @set: set true to set the key, false to get the key
4087 * get (0x0B04) or set (0x0B02) the RSS key per VSI
4090 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4091 struct ice_aqc_get_set_rss_keys *key,
4094 struct ice_aqc_get_set_rss_key *cmd_resp;
4095 u16 key_size = sizeof(*key);
4096 struct ice_aq_desc desc;
4098 cmd_resp = &desc.params.get_set_rss_key;
4101 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4102 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4104 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4107 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4108 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
4109 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
4110 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
4112 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4116 * ice_aq_get_rss_key
4117 * @hw: pointer to the HW struct
4118 * @vsi_handle: software VSI handle
4119 * @key: pointer to key info struct
4121 * get the RSS key per VSI
4124 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4125 struct ice_aqc_get_set_rss_keys *key)
4127 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4128 return ICE_ERR_PARAM;
4130 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4135 * ice_aq_set_rss_key
4136 * @hw: pointer to the HW struct
4137 * @vsi_handle: software VSI handle
4138 * @keys: pointer to key info struct
4140 * set the RSS key per VSI
4143 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4144 struct ice_aqc_get_set_rss_keys *keys)
4146 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4147 return ICE_ERR_PARAM;
4149 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4154 * ice_aq_add_lan_txq
4155 * @hw: pointer to the hardware structure
4156 * @num_qgrps: Number of added queue groups
4157 * @qg_list: list of queue groups to be added
4158 * @buf_size: size of buffer for indirect command
4159 * @cd: pointer to command details structure or NULL
4161 * Add Tx LAN queue (0x0C30)
4164 * Prior to calling add Tx LAN queue:
4165 * Initialize the following as part of the Tx queue context:
4166 * Completion queue ID if the queue uses Completion queue, Quanta profile,
4167 * Cache profile and Packet shaper profile.
4169 * After add Tx LAN queue AQ command is completed:
4170 * Interrupts should be associated with specific queues,
4171 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4175 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4176 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4177 struct ice_sq_cd *cd)
4179 struct ice_aqc_add_tx_qgrp *list;
4180 struct ice_aqc_add_txqs *cmd;
4181 struct ice_aq_desc desc;
4182 u16 i, sum_size = 0;
4184 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4186 cmd = &desc.params.add_txqs;
4188 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4191 return ICE_ERR_PARAM;
4193 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4194 return ICE_ERR_PARAM;
4196 for (i = 0, list = qg_list; i < num_qgrps; i++) {
4197 sum_size += ice_struct_size(list, txqs, list->num_txqs);
4198 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4202 if (buf_size != sum_size)
4203 return ICE_ERR_PARAM;
4205 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4207 cmd->num_qgrps = num_qgrps;
4209 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4213 * ice_aq_dis_lan_txq
4214 * @hw: pointer to the hardware structure
4215 * @num_qgrps: number of groups in the list
4216 * @qg_list: the list of groups to disable
4217 * @buf_size: the total size of the qg_list buffer in bytes
4218 * @rst_src: if called due to reset, specifies the reset source
4219 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4220 * @cd: pointer to command details structure or NULL
4222 * Disable LAN Tx queue (0x0C31)
4224 static enum ice_status
4225 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4226 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4227 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4228 struct ice_sq_cd *cd)
4230 struct ice_aqc_dis_txq_item *item;
4231 struct ice_aqc_dis_txqs *cmd;
4232 struct ice_aq_desc desc;
4233 enum ice_status status;
4236 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4237 cmd = &desc.params.dis_txqs;
4238 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4240 /* qg_list can be NULL only in VM/VF reset flow */
4241 if (!qg_list && !rst_src)
4242 return ICE_ERR_PARAM;
4244 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4245 return ICE_ERR_PARAM;
4247 cmd->num_entries = num_qgrps;
4249 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4250 ICE_AQC_Q_DIS_TIMEOUT_M);
4254 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4255 cmd->vmvf_and_timeout |=
4256 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4263 /* flush pipe on time out */
4264 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4265 /* If no queue group info, we are in a reset flow. Issue the AQ */
4269 /* set RD bit to indicate that command buffer is provided by the driver
4270 * and it needs to be read by the firmware
4272 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4274 for (i = 0, item = qg_list; i < num_qgrps; i++) {
4275 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4277 /* If the num of queues is even, add 2 bytes of padding */
4278 if ((item->num_qs % 2) == 0)
4283 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4287 return ICE_ERR_PARAM;
4290 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4293 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4294 vmvf_num, hw->adminq.sq_last_status);
4296 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4297 LE16_TO_CPU(qg_list[0].q_id[0]),
4298 hw->adminq.sq_last_status);
4304 * ice_aq_move_recfg_lan_txq
4305 * @hw: pointer to the hardware structure
4306 * @num_qs: number of queues to move/reconfigure
4307 * @is_move: true if this operation involves node movement
4308 * @is_tc_change: true if this operation involves a TC change
4309 * @subseq_call: true if this operation is a subsequent call
4310 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4311 * @timeout: timeout in units of 100 usec (valid values 0-50)
4312 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
4313 * @buf: struct containing src/dest TEID and per-queue info
4314 * @buf_size: size of buffer for indirect command
4315 * @txqs_moved: out param, number of queues successfully moved
4316 * @cd: pointer to command details structure or NULL
4318 * Move / Reconfigure Tx LAN queues (0x0C32)
4321 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4322 bool is_tc_change, bool subseq_call, bool flush_pipe,
4323 u8 timeout, u32 *blocked_cgds,
4324 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
4325 u8 *txqs_moved, struct ice_sq_cd *cd)
4327 struct ice_aqc_move_txqs *cmd;
4328 struct ice_aq_desc desc;
4329 enum ice_status status;
4331 cmd = &desc.params.move_txqs;
4332 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
4334 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
4335 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4336 return ICE_ERR_PARAM;
4338 if (is_tc_change && !flush_pipe && !blocked_cgds)
4339 return ICE_ERR_PARAM;
4341 if (!is_move && !is_tc_change)
4342 return ICE_ERR_PARAM;
4344 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4347 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4350 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4353 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4356 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4358 cmd->num_qs = num_qs;
4359 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4360 ICE_AQC_Q_CMD_TIMEOUT_M);
4362 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4364 if (!status && txqs_moved)
4365 *txqs_moved = cmd->num_qs;
4367 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4368 is_tc_change && !flush_pipe)
4369 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4374 /* End of FW Admin Queue command wrappers */
4377 * ice_write_byte - write a byte to a packed context structure
4378 * @src_ctx: the context structure to read from
4379 * @dest_ctx: the context to be written to
4380 * @ce_info: a description of the struct to be filled
4383 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4385 u8 src_byte, dest_byte, mask;
4389 /* copy from the next struct field */
4390 from = src_ctx + ce_info->offset;
4392 /* prepare the bits and mask */
4393 shift_width = ce_info->lsb % 8;
4394 mask = (u8)(BIT(ce_info->width) - 1);
4399 /* shift to correct alignment */
4400 mask <<= shift_width;
4401 src_byte <<= shift_width;
4403 /* get the current bits from the target bit string */
4404 dest = dest_ctx + (ce_info->lsb / 8);
4406 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4408 dest_byte &= ~mask; /* get the bits not changing */
4409 dest_byte |= src_byte; /* add in the new bits */
4411 /* put it all back */
4412 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4416 * ice_write_word - write a word to a packed context structure
4417 * @src_ctx: the context structure to read from
4418 * @dest_ctx: the context to be written to
4419 * @ce_info: a description of the struct to be filled
4422 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4429 /* copy from the next struct field */
4430 from = src_ctx + ce_info->offset;
4432 /* prepare the bits and mask */
4433 shift_width = ce_info->lsb % 8;
4434 mask = BIT(ce_info->width) - 1;
4436 /* don't swizzle the bits until after the mask because the mask bits
4437 * will be in a different bit position on big endian machines
4439 src_word = *(u16 *)from;
4442 /* shift to correct alignment */
4443 mask <<= shift_width;
4444 src_word <<= shift_width;
4446 /* get the current bits from the target bit string */
4447 dest = dest_ctx + (ce_info->lsb / 8);
4449 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
4451 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
4452 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
4454 /* put it all back */
4455 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4459 * ice_write_dword - write a dword to a packed context structure
4460 * @src_ctx: the context structure to read from
4461 * @dest_ctx: the context to be written to
4462 * @ce_info: a description of the struct to be filled
4465 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4467 u32 src_dword, mask;
4472 /* copy from the next struct field */
4473 from = src_ctx + ce_info->offset;
4475 /* prepare the bits and mask */
4476 shift_width = ce_info->lsb % 8;
4478 /* if the field width is exactly 32 on an x86 machine, then the shift
4479 * operation will not work because the SHL instructions count is masked
4480 * to 5 bits so the shift will do nothing
4482 if (ce_info->width < 32)
4483 mask = BIT(ce_info->width) - 1;
4487 /* don't swizzle the bits until after the mask because the mask bits
4488 * will be in a different bit position on big endian machines
4490 src_dword = *(u32 *)from;
4493 /* shift to correct alignment */
4494 mask <<= shift_width;
4495 src_dword <<= shift_width;
4497 /* get the current bits from the target bit string */
4498 dest = dest_ctx + (ce_info->lsb / 8);
4500 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4502 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
4503 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
4505 /* put it all back */
4506 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4510 * ice_write_qword - write a qword to a packed context structure
4511 * @src_ctx: the context structure to read from
4512 * @dest_ctx: the context to be written to
4513 * @ce_info: a description of the struct to be filled
4516 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4518 u64 src_qword, mask;
4523 /* copy from the next struct field */
4524 from = src_ctx + ce_info->offset;
4526 /* prepare the bits and mask */
4527 shift_width = ce_info->lsb % 8;
4529 /* if the field width is exactly 64 on an x86 machine, then the shift
4530 * operation will not work because the SHL instructions count is masked
4531 * to 6 bits so the shift will do nothing
4533 if (ce_info->width < 64)
4534 mask = BIT_ULL(ce_info->width) - 1;
4538 /* don't swizzle the bits until after the mask because the mask bits
4539 * will be in a different bit position on big endian machines
4541 src_qword = *(u64 *)from;
4544 /* shift to correct alignment */
4545 mask <<= shift_width;
4546 src_qword <<= shift_width;
4548 /* get the current bits from the target bit string */
4549 dest = dest_ctx + (ce_info->lsb / 8);
4551 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4553 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
4554 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
4556 /* put it all back */
4557 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4561 * ice_set_ctx - set context bits in packed structure
4562 * @hw: pointer to the hardware structure
4563 * @src_ctx: pointer to a generic non-packed context structure
4564 * @dest_ctx: pointer to memory for the packed structure
4565 * @ce_info: a description of the structure to be transformed
4568 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4569 const struct ice_ctx_ele *ce_info)
4573 for (f = 0; ce_info[f].width; f++) {
4574 /* We have to deal with each element of the FW response
4575 * using the correct size so that we are correct regardless
4576 * of the endianness of the machine.
4578 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4579 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4580 f, ce_info[f].width, ce_info[f].size_of);
4583 switch (ce_info[f].size_of) {
4585 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4588 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4591 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4594 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4597 return ICE_ERR_INVAL_SIZE;
4605 * ice_aq_get_internal_data
4606 * @hw: pointer to the hardware structure
4607 * @cluster_id: specific cluster to dump
4608 * @table_id: table ID within cluster
4609 * @start: index of line in the block to read
4611 * @buf_size: dump buffer size
4612 * @ret_buf_size: return buffer size (returned by FW)
4613 * @ret_next_table: next block to read (returned by FW)
4614 * @ret_next_index: next index to read (returned by FW)
4615 * @cd: pointer to command details structure
4617 * Get internal FW/HW data (0xFF08) for debug purposes.
4620 ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
4621 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
4622 u16 *ret_next_table, u32 *ret_next_index,
4623 struct ice_sq_cd *cd)
4625 struct ice_aqc_debug_dump_internals *cmd;
4626 struct ice_aq_desc desc;
4627 enum ice_status status;
4629 cmd = &desc.params.debug_dump;
4631 if (buf_size == 0 || !buf)
4632 return ICE_ERR_PARAM;
4634 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
4636 cmd->cluster_id = cluster_id;
4637 cmd->table_id = CPU_TO_LE16(table_id);
4638 cmd->idx = CPU_TO_LE32(start);
4640 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4644 *ret_buf_size = LE16_TO_CPU(desc.datalen);
4646 *ret_next_table = LE16_TO_CPU(cmd->table_id);
4648 *ret_next_index = LE32_TO_CPU(cmd->idx);
4655 * ice_read_byte - read context byte into struct
4656 * @src_ctx: the context structure to read from
4657 * @dest_ctx: the context to be written to
4658 * @ce_info: a description of the struct to be filled
4661 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4667 /* prepare the bits and mask */
4668 shift_width = ce_info->lsb % 8;
4669 mask = (u8)(BIT(ce_info->width) - 1);
4671 /* shift to correct alignment */
4672 mask <<= shift_width;
4674 /* get the current bits from the src bit string */
4675 src = src_ctx + (ce_info->lsb / 8);
4677 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4679 dest_byte &= ~(mask);
4681 dest_byte >>= shift_width;
4683 /* get the address from the struct field */
4684 target = dest_ctx + ce_info->offset;
4686 /* put it back in the struct */
4687 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4691 * ice_read_word - read context word into struct
4692 * @src_ctx: the context structure to read from
4693 * @dest_ctx: the context to be written to
4694 * @ce_info: a description of the struct to be filled
4697 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4699 u16 dest_word, mask;
4704 /* prepare the bits and mask */
4705 shift_width = ce_info->lsb % 8;
4706 mask = BIT(ce_info->width) - 1;
4708 /* shift to correct alignment */
4709 mask <<= shift_width;
4711 /* get the current bits from the src bit string */
4712 src = src_ctx + (ce_info->lsb / 8);
4714 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4716 /* the data in the memory is stored as little endian so mask it
4719 src_word &= ~(CPU_TO_LE16(mask));
4721 /* get the data back into host order before shifting */
4722 dest_word = LE16_TO_CPU(src_word);
4724 dest_word >>= shift_width;
4726 /* get the address from the struct field */
4727 target = dest_ctx + ce_info->offset;
4729 /* put it back in the struct */
4730 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4734 * ice_read_dword - read context dword into struct
4735 * @src_ctx: the context structure to read from
4736 * @dest_ctx: the context to be written to
4737 * @ce_info: a description of the struct to be filled
4740 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4742 u32 dest_dword, mask;
4747 /* prepare the bits and mask */
4748 shift_width = ce_info->lsb % 8;
4750 /* if the field width is exactly 32 on an x86 machine, then the shift
4751 * operation will not work because the SHL instructions count is masked
4752 * to 5 bits so the shift will do nothing
4754 if (ce_info->width < 32)
4755 mask = BIT(ce_info->width) - 1;
4759 /* shift to correct alignment */
4760 mask <<= shift_width;
4762 /* get the current bits from the src bit string */
4763 src = src_ctx + (ce_info->lsb / 8);
4765 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4767 /* the data in the memory is stored as little endian so mask it
4770 src_dword &= ~(CPU_TO_LE32(mask));
4772 /* get the data back into host order before shifting */
4773 dest_dword = LE32_TO_CPU(src_dword);
4775 dest_dword >>= shift_width;
4777 /* get the address from the struct field */
4778 target = dest_ctx + ce_info->offset;
4780 /* put it back in the struct */
4781 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4785 * ice_read_qword - read context qword into struct
4786 * @src_ctx: the context structure to read from
4787 * @dest_ctx: the context to be written to
4788 * @ce_info: a description of the struct to be filled
4791 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4793 u64 dest_qword, mask;
4798 /* prepare the bits and mask */
4799 shift_width = ce_info->lsb % 8;
4801 /* if the field width is exactly 64 on an x86 machine, then the shift
4802 * operation will not work because the SHL instructions count is masked
4803 * to 6 bits so the shift will do nothing
4805 if (ce_info->width < 64)
4806 mask = BIT_ULL(ce_info->width) - 1;
4810 /* shift to correct alignment */
4811 mask <<= shift_width;
4813 /* get the current bits from the src bit string */
4814 src = src_ctx + (ce_info->lsb / 8);
4816 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4818 /* the data in the memory is stored as little endian so mask it
4821 src_qword &= ~(CPU_TO_LE64(mask));
4823 /* get the data back into host order before shifting */
4824 dest_qword = LE64_TO_CPU(src_qword);
4826 dest_qword >>= shift_width;
4828 /* get the address from the struct field */
4829 target = dest_ctx + ce_info->offset;
4831 /* put it back in the struct */
4832 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4836 * ice_get_ctx - extract context bits from a packed structure
4837 * @src_ctx: pointer to a generic packed context structure
4838 * @dest_ctx: pointer to a generic non-packed context structure
4839 * @ce_info: a description of the structure to be read from
4842 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4846 for (f = 0; ce_info[f].width; f++) {
4847 switch (ce_info[f].size_of) {
4849 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4852 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4855 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4858 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4861 /* nothing to do, just keep going */
4870 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4871 * @hw: pointer to the HW struct
4872 * @vsi_handle: software VSI handle
4874 * @q_handle: software queue handle
4877 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4879 struct ice_vsi_ctx *vsi;
4880 struct ice_q_ctx *q_ctx;
4882 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4885 if (q_handle >= vsi->num_lan_q_entries[tc])
4887 if (!vsi->lan_q_ctx[tc])
4889 q_ctx = vsi->lan_q_ctx[tc];
4890 return &q_ctx[q_handle];
4895 * @pi: port information structure
4896 * @vsi_handle: software VSI handle
4898 * @q_handle: software queue handle
4899 * @num_qgrps: Number of added queue groups
4900 * @buf: list of queue groups to be added
4901 * @buf_size: size of buffer for indirect command
4902 * @cd: pointer to command details structure or NULL
4904 * This function adds one LAN queue
4907 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4908 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4909 struct ice_sq_cd *cd)
4911 struct ice_aqc_txsched_elem_data node = { 0 };
4912 struct ice_sched_node *parent;
4913 struct ice_q_ctx *q_ctx;
4914 enum ice_status status;
4917 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4920 if (num_qgrps > 1 || buf->num_txqs > 1)
4921 return ICE_ERR_MAX_LIMIT;
4925 if (!ice_is_vsi_valid(hw, vsi_handle))
4926 return ICE_ERR_PARAM;
4928 ice_acquire_lock(&pi->sched_lock);
4930 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4932 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4934 status = ICE_ERR_PARAM;
4938 /* find a parent node */
4939 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4940 ICE_SCHED_NODE_OWNER_LAN);
4942 status = ICE_ERR_PARAM;
4946 buf->parent_teid = parent->info.node_teid;
4947 node.parent_teid = parent->info.node_teid;
4948 /* Mark that the values in the "generic" section as valid. The default
4949 * value in the "generic" section is zero. This means that :
4950 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4951 * - 0 priority among siblings, indicated by Bit 1-3.
4952 * - WFQ, indicated by Bit 4.
4953 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4955 * - Bit 7 is reserved.
4956 * Without setting the generic section as valid in valid_sections, the
4957 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4959 buf->txqs[0].info.valid_sections =
4960 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4961 ICE_AQC_ELEM_VALID_EIR;
4962 buf->txqs[0].info.generic = 0;
4963 buf->txqs[0].info.cir_bw.bw_profile_idx =
4964 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4965 buf->txqs[0].info.cir_bw.bw_alloc =
4966 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4967 buf->txqs[0].info.eir_bw.bw_profile_idx =
4968 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4969 buf->txqs[0].info.eir_bw.bw_alloc =
4970 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4972 /* add the LAN queue */
4973 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4974 if (status != ICE_SUCCESS) {
4975 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4976 LE16_TO_CPU(buf->txqs[0].txq_id),
4977 hw->adminq.sq_last_status);
4981 node.node_teid = buf->txqs[0].q_teid;
4982 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4983 q_ctx->q_handle = q_handle;
4984 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4986 /* add a leaf node into scheduler tree queue layer */
4987 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4989 status = ice_sched_replay_q_bw(pi, q_ctx);
4992 ice_release_lock(&pi->sched_lock);
4998 * @pi: port information structure
4999 * @vsi_handle: software VSI handle
5001 * @num_queues: number of queues
5002 * @q_handles: pointer to software queue handle array
5003 * @q_ids: pointer to the q_id array
5004 * @q_teids: pointer to queue node teids
5005 * @rst_src: if called due to reset, specifies the reset source
5006 * @vmvf_num: the relative VM or VF number that is undergoing the reset
5007 * @cd: pointer to command details structure or NULL
5009 * This function removes queues and their corresponding nodes in SW DB
5012 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
5013 u16 *q_handles, u16 *q_ids, u32 *q_teids,
5014 enum ice_disq_rst_src rst_src, u16 vmvf_num,
5015 struct ice_sq_cd *cd)
5017 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
5018 struct ice_aqc_dis_txq_item *qg_list;
5019 struct ice_q_ctx *q_ctx;
5023 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5029 /* if queue is disabled already yet the disable queue command
5030 * has to be sent to complete the VF reset, then call
5031 * ice_aq_dis_lan_txq without any queue information
5034 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5039 buf_size = ice_struct_size(qg_list, q_id, 1);
5040 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
5042 return ICE_ERR_NO_MEMORY;
5044 ice_acquire_lock(&pi->sched_lock);
5046 for (i = 0; i < num_queues; i++) {
5047 struct ice_sched_node *node;
5049 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5052 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5054 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5058 if (q_ctx->q_handle != q_handles[i]) {
5059 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5060 q_ctx->q_handle, q_handles[i]);
5063 qg_list->parent_teid = node->info.parent_teid;
5064 qg_list->num_qs = 1;
5065 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
5066 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5069 if (status != ICE_SUCCESS)
5071 ice_free_sched_node(pi, node);
5072 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5074 ice_release_lock(&pi->sched_lock);
5075 ice_free(hw, qg_list);
5080 * ice_cfg_vsi_qs - configure the new/existing VSI queues
5081 * @pi: port information structure
5082 * @vsi_handle: software VSI handle
5083 * @tc_bitmap: TC bitmap
5084 * @maxqs: max queues array per TC
5085 * @owner: LAN or RDMA
5087 * This function adds/updates the VSI queues per TC.
5089 static enum ice_status
5090 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5091 u16 *maxqs, u8 owner)
5093 enum ice_status status = ICE_SUCCESS;
5096 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5099 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5100 return ICE_ERR_PARAM;
5102 ice_acquire_lock(&pi->sched_lock);
5104 ice_for_each_traffic_class(i) {
5105 /* configuration is possible only if TC node is present */
5106 if (!ice_sched_get_tc_node(pi, i))
5109 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5110 ice_is_tc_ena(tc_bitmap, i));
5115 ice_release_lock(&pi->sched_lock);
5120 * ice_cfg_vsi_lan - configure VSI LAN queues
5121 * @pi: port information structure
5122 * @vsi_handle: software VSI handle
5123 * @tc_bitmap: TC bitmap
5124 * @max_lanqs: max LAN queues array per TC
5126 * This function adds/updates the VSI LAN queues per TC.
5129 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5132 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5133 ICE_SCHED_NODE_OWNER_LAN);
5137 * ice_is_main_vsi - checks whether the VSI is main VSI
5138 * @hw: pointer to the HW struct
5139 * @vsi_handle: VSI handle
5141 * Checks whether the VSI is the main VSI (the first PF VSI created on
5144 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
5146 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
5150 * ice_replay_pre_init - replay pre initialization
5151 * @hw: pointer to the HW struct
5152 * @sw: pointer to switch info struct for which function initializes filters
5154 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5157 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
5159 enum ice_status status;
5162 /* Delete old entries from replay filter list head if there is any */
5163 ice_rm_sw_replay_rule_info(hw, sw);
5164 /* In start of replay, move entries into replay_rules list, it
5165 * will allow adding rules entries back to filt_rules list,
5166 * which is operational list.
5168 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5169 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
5170 &sw->recp_list[i].filt_replay_rules);
5171 ice_sched_replay_agg_vsi_preinit(hw);
5173 status = ice_sched_replay_root_node_bw(hw->port_info);
5177 return ice_sched_replay_tc_node_bw(hw->port_info);
5181 * ice_replay_vsi - replay VSI configuration
5182 * @hw: pointer to the HW struct
5183 * @vsi_handle: driver VSI handle
5185 * Restore all VSI configuration after reset. It is required to call this
5186 * function with main VSI first.
5188 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5190 struct ice_switch_info *sw = hw->switch_info;
5191 struct ice_port_info *pi = hw->port_info;
5192 enum ice_status status;
5194 if (!ice_is_vsi_valid(hw, vsi_handle))
5195 return ICE_ERR_PARAM;
5197 /* Replay pre-initialization if there is any */
5198 if (ice_is_main_vsi(hw, vsi_handle)) {
5199 status = ice_replay_pre_init(hw, sw);
5203 /* Replay per VSI all RSS configurations */
5204 status = ice_replay_rss_cfg(hw, vsi_handle);
5207 /* Replay per VSI all filters */
5208 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
5210 status = ice_replay_vsi_agg(hw, vsi_handle);
5215 * ice_replay_post - post replay configuration cleanup
5216 * @hw: pointer to the HW struct
5218 * Post replay cleanup.
5220 void ice_replay_post(struct ice_hw *hw)
5222 /* Delete old entries from replay filter list head */
5223 ice_rm_all_sw_replay_rule_info(hw);
5224 ice_sched_replay_agg(hw);
5228 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5229 * @hw: ptr to the hardware info
5230 * @reg: offset of 64 bit HW register to read from
5231 * @prev_stat_loaded: bool to specify if previous stats are loaded
5232 * @prev_stat: ptr to previous loaded stat value
5233 * @cur_stat: ptr to current stat value
5236 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5237 u64 *prev_stat, u64 *cur_stat)
5239 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5241 /* device stats are not reset at PFR, they likely will not be zeroed
5242 * when the driver starts. Thus, save the value from the first read
5243 * without adding to the statistic value so that we report stats which
5244 * count up from zero.
5246 if (!prev_stat_loaded) {
5247 *prev_stat = new_data;
5251 /* Calculate the difference between the new and old values, and then
5252 * add it to the software stat value.
5254 if (new_data >= *prev_stat)
5255 *cur_stat += new_data - *prev_stat;
5257 /* to manage the potential roll-over */
5258 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5260 /* Update the previously stored value to prepare for next read */
5261 *prev_stat = new_data;
5265 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5266 * @hw: ptr to the hardware info
5267 * @reg: offset of HW register to read from
5268 * @prev_stat_loaded: bool to specify if previous stats are loaded
5269 * @prev_stat: ptr to previous loaded stat value
5270 * @cur_stat: ptr to current stat value
5273 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5274 u64 *prev_stat, u64 *cur_stat)
5278 new_data = rd32(hw, reg);
5280 /* device stats are not reset at PFR, they likely will not be zeroed
5281 * when the driver starts. Thus, save the value from the first read
5282 * without adding to the statistic value so that we report stats which
5283 * count up from zero.
5285 if (!prev_stat_loaded) {
5286 *prev_stat = new_data;
5290 /* Calculate the difference between the new and old values, and then
5291 * add it to the software stat value.
5293 if (new_data >= *prev_stat)
5294 *cur_stat += new_data - *prev_stat;
5296 /* to manage the potential roll-over */
5297 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5299 /* Update the previously stored value to prepare for next read */
5300 *prev_stat = new_data;
5304 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5305 * @hw: ptr to the hardware info
5306 * @vsi_handle: VSI handle
5307 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
5308 * @cur_stats: ptr to current stats structure
5310 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
5311 * thus cannot be read using the normal ice_stat_update32 function.
5313 * Read the GLV_REPC register associated with the given VSI, and update the
5314 * rx_no_desc and rx_error values in the ice_eth_stats structure.
5316 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
5317 * cleared each time it's read.
5319 * Note that the GLV_RDPC register also counts the causes that would trigger
5320 * GLV_REPC. However, it does not give the finer grained detail about why the
5321 * packets are being dropped. The GLV_REPC values can be used to distinguish
5322 * whether Rx packets are dropped due to errors or due to no available
5326 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5327 struct ice_eth_stats *cur_stats)
5329 u16 vsi_num, no_desc, error_cnt;
5332 if (!ice_is_vsi_valid(hw, vsi_handle))
5335 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
5337 /* If we haven't loaded stats yet, just clear the current value */
5338 if (!prev_stat_loaded) {
5339 wr32(hw, GLV_REPC(vsi_num), 0);
5343 repc = rd32(hw, GLV_REPC(vsi_num));
5344 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
5345 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
5347 /* Clear the count by writing to the stats register */
5348 wr32(hw, GLV_REPC(vsi_num), 0);
5350 cur_stats->rx_no_desc += no_desc;
5351 cur_stats->rx_errors += error_cnt;
5355 * ice_sched_query_elem - query element information from HW
5356 * @hw: pointer to the HW struct
5357 * @node_teid: node TEID to be queried
5358 * @buf: buffer to element information
5360 * This function queries HW element information
5363 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
5364 struct ice_aqc_txsched_elem_data *buf)
5366 u16 buf_size, num_elem_ret = 0;
5367 enum ice_status status;
5369 buf_size = sizeof(*buf);
5370 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
5371 buf->node_teid = CPU_TO_LE32(node_teid);
5372 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
5374 if (status != ICE_SUCCESS || num_elem_ret != 1)
5375 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
5380 * ice_get_fw_mode - returns FW mode
5381 * @hw: pointer to the HW struct
5383 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
5385 #define ICE_FW_MODE_DBG_M BIT(0)
5386 #define ICE_FW_MODE_REC_M BIT(1)
5387 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
5390 /* check the current FW mode */
5391 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
5393 if (fw_mode & ICE_FW_MODE_DBG_M)
5394 return ICE_FW_MODE_DBG;
5395 else if (fw_mode & ICE_FW_MODE_REC_M)
5396 return ICE_FW_MODE_REC;
5397 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
5398 return ICE_FW_MODE_ROLLBACK;
5400 return ICE_FW_MODE_NORMAL;
5405 * @hw: pointer to the hw struct
5406 * @topo_addr: topology address for a device to communicate with
5407 * @bus_addr: 7-bit I2C bus address
5408 * @addr: I2C memory address (I2C offset) with up to 16 bits
5409 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
5410 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
5411 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
5412 * @cd: pointer to command details structure or NULL
5417 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5418 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5419 struct ice_sq_cd *cd)
5421 struct ice_aq_desc desc = { 0 };
5422 struct ice_aqc_i2c *cmd;
5423 enum ice_status status;
5426 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
5427 cmd = &desc.params.read_write_i2c;
5430 return ICE_ERR_PARAM;
5432 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5434 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5435 cmd->topo_addr = topo_addr;
5436 cmd->i2c_params = params;
5437 cmd->i2c_addr = addr;
5439 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5441 struct ice_aqc_read_i2c_resp *resp;
5444 resp = &desc.params.read_i2c_resp;
5445 for (i = 0; i < data_size; i++) {
5446 *data = resp->i2c_data[i];
5456 * @hw: pointer to the hw struct
5457 * @topo_addr: topology address for a device to communicate with
5458 * @bus_addr: 7-bit I2C bus address
5459 * @addr: I2C memory address (I2C offset) with up to 16 bits
5460 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
5461 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
5462 * @cd: pointer to command details structure or NULL
5464 * Write I2C (0x06E3)
5467 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
5468 u16 bus_addr, __le16 addr, u8 params, u8 *data,
5469 struct ice_sq_cd *cd)
5471 struct ice_aq_desc desc = { 0 };
5472 struct ice_aqc_i2c *cmd;
5475 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
5476 cmd = &desc.params.read_write_i2c;
5478 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
5480 /* data_size limited to 4 */
5482 return ICE_ERR_PARAM;
5484 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
5485 cmd->topo_addr = topo_addr;
5486 cmd->i2c_params = params;
5487 cmd->i2c_addr = addr;
5489 for (i = 0; i < data_size; i++) {
5490 cmd->i2c_data[i] = *data;
5494 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5498 * ice_aq_set_driver_param - Set driver parameter to share via firmware
5499 * @hw: pointer to the HW struct
5500 * @idx: parameter index to set
5501 * @value: the value to set the parameter to
5502 * @cd: pointer to command details structure or NULL
5504 * Set the value of one of the software defined parameters. All PFs connected
5505 * to this device can read the value using ice_aq_get_driver_param.
5507 * Note that firmware provides no synchronization or locking, and will not
5508 * save the parameter value during a device reset. It is expected that
5509 * a single PF will write the parameter value, while all other PFs will only
5513 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
5514 u32 value, struct ice_sq_cd *cd)
5516 struct ice_aqc_driver_shared_params *cmd;
5517 struct ice_aq_desc desc;
5519 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
5520 return ICE_ERR_OUT_OF_RANGE;
5522 cmd = &desc.params.drv_shared_params;
5524 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
5526 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
5527 cmd->param_indx = idx;
5528 cmd->param_val = CPU_TO_LE32(value);
5530 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5534 * ice_aq_get_driver_param - Get driver parameter shared via firmware
5535 * @hw: pointer to the HW struct
5536 * @idx: parameter index to set
5537 * @value: storage to return the shared parameter
5538 * @cd: pointer to command details structure or NULL
5540 * Get the value of one of the software defined parameters.
5542 * Note that firmware provides no synchronization or locking. It is expected
5543 * that only a single PF will write a given parameter.
5546 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
5547 u32 *value, struct ice_sq_cd *cd)
5549 struct ice_aqc_driver_shared_params *cmd;
5550 struct ice_aq_desc desc;
5551 enum ice_status status;
5553 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
5554 return ICE_ERR_OUT_OF_RANGE;
5556 cmd = &desc.params.drv_shared_params;
5558 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
5560 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
5561 cmd->param_indx = idx;
5563 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5567 *value = LE32_TO_CPU(cmd->param_val);
5574 * @hw: pointer to the hw struct
5575 * @gpio_ctrl_handle: GPIO controller node handle
5576 * @pin_idx: IO Number of the GPIO that needs to be set
5577 * @value: SW provide IO value to set in the LSB
5578 * @cd: pointer to command details structure or NULL
5580 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
5583 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
5584 struct ice_sq_cd *cd)
5586 struct ice_aqc_gpio *cmd;
5587 struct ice_aq_desc desc;
5589 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
5590 cmd = &desc.params.read_write_gpio;
5591 cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5592 cmd->gpio_num = pin_idx;
5593 cmd->gpio_val = value ? 1 : 0;
5595 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5600 * @hw: pointer to the hw struct
5601 * @gpio_ctrl_handle: GPIO controller node handle
5602 * @pin_idx: IO Number of the GPIO that needs to be set
5603 * @value: IO value read
5604 * @cd: pointer to command details structure or NULL
5606 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
5610 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5611 bool *value, struct ice_sq_cd *cd)
5613 struct ice_aqc_gpio *cmd;
5614 struct ice_aq_desc desc;
5615 enum ice_status status;
5617 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5618 cmd = &desc.params.read_write_gpio;
5619 cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5620 cmd->gpio_num = pin_idx;
5622 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5626 *value = !!cmd->gpio_val;
5631 * ice_fw_supports_link_override
5632 * @hw: pointer to the hardware structure
5634 * Checks if the firmware supports link override
5636 bool ice_fw_supports_link_override(struct ice_hw *hw)
5638 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5639 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5641 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5642 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5644 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5652 * ice_get_link_default_override
5653 * @ldo: pointer to the link default override struct
5654 * @pi: pointer to the port info struct
5656 * Gets the link default override for a port
5659 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5660 struct ice_port_info *pi)
5662 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5663 struct ice_hw *hw = pi->hw;
5664 enum ice_status status;
5666 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5667 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5669 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5673 /* Each port has its own config; calculate for our port */
5674 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5675 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5677 /* link options first */
5678 status = ice_read_sr_word(hw, tlv_start, &buf);
5680 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5683 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5684 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5685 ICE_LINK_OVERRIDE_PHY_CFG_S;
5687 /* link PHY config */
5688 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5689 status = ice_read_sr_word(hw, offset, &buf);
5691 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5694 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5697 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5698 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5699 status = ice_read_sr_word(hw, (offset + i), &buf);
5701 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5704 /* shift 16 bits at a time to fill 64 bits */
5705 ldo->phy_type_low |= ((u64)buf << (i * 16));
5708 /* PHY types high */
5709 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5710 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5711 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5712 status = ice_read_sr_word(hw, (offset + i), &buf);
5714 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5717 /* shift 16 bits at a time to fill 64 bits */
5718 ldo->phy_type_high |= ((u64)buf << (i * 16));
5725 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5726 * @caps: get PHY capability data
5728 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5730 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5731 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5732 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5733 ICE_AQC_PHY_AN_EN_CLAUSE37))
5740 * ice_aq_set_lldp_mib - Set the LLDP MIB
5741 * @hw: pointer to the HW struct
5742 * @mib_type: Local, Remote or both Local and Remote MIBs
5743 * @buf: pointer to the caller-supplied buffer to store the MIB block
5744 * @buf_size: size of the buffer (in bytes)
5745 * @cd: pointer to command details structure or NULL
5747 * Set the LLDP MIB. (0x0A08)
5750 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5751 struct ice_sq_cd *cd)
5753 struct ice_aqc_lldp_set_local_mib *cmd;
5754 struct ice_aq_desc desc;
5756 cmd = &desc.params.lldp_set_mib;
5758 if (buf_size == 0 || !buf)
5759 return ICE_ERR_PARAM;
5761 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5763 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
5764 desc.datalen = CPU_TO_LE16(buf_size);
5766 cmd->type = mib_type;
5767 cmd->length = CPU_TO_LE16(buf_size);
5769 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5773 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
5774 * @hw: pointer to HW struct
5776 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5778 if (hw->mac_type != ICE_MAC_E810)
5781 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5782 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5784 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5785 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5787 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5794 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5795 * @hw: pointer to HW struct
5796 * @vsi_num: absolute HW index for VSI
5797 * @add: boolean for if adding or removing a filter
5800 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5802 struct ice_aqc_lldp_filter_ctrl *cmd;
5803 struct ice_aq_desc desc;
5805 cmd = &desc.params.lldp_filter_ctrl;
5807 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5810 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5812 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5814 cmd->vsi_num = CPU_TO_LE16(vsi_num);
5816 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5820 * ice_fw_supports_report_dflt_cfg
5821 * @hw: pointer to the hardware structure
5823 * Checks if the firmware supports report default configuration
5825 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5827 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5828 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5830 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5831 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5833 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {