1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 300
15 * ice_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
18 * This function sets the MAC type of the adapter based on the
19 * vendor ID and device ID stored in the HW structure.
21 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
23 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
25 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
26 return ICE_ERR_DEVICE_NOT_SUPPORTED;
28 switch (hw->device_id) {
29 case ICE_DEV_ID_E810C_BACKPLANE:
30 case ICE_DEV_ID_E810C_QSFP:
31 case ICE_DEV_ID_E810C_SFP:
32 case ICE_DEV_ID_E810_XXV_BACKPLANE:
33 case ICE_DEV_ID_E810_XXV_QSFP:
34 case ICE_DEV_ID_E810_XXV_SFP:
35 hw->mac_type = ICE_MAC_E810;
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 case ICE_DEV_ID_E823C_10G_BASE_T:
52 case ICE_DEV_ID_E823C_BACKPLANE:
53 case ICE_DEV_ID_E823C_QSFP:
54 case ICE_DEV_ID_E823C_SFP:
55 case ICE_DEV_ID_E823C_SGMII:
56 hw->mac_type = ICE_MAC_GENERIC;
59 hw->mac_type = ICE_MAC_UNKNOWN;
63 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
68 * ice_clear_pf_cfg - Clear PF configuration
69 * @hw: pointer to the hardware structure
71 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
72 * configuration, flow director filters, etc.).
74 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
76 struct ice_aq_desc desc;
78 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
80 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
84 * ice_aq_manage_mac_read - manage MAC address read command
85 * @hw: pointer to the HW struct
86 * @buf: a virtual buffer to hold the manage MAC read response
87 * @buf_size: Size of the virtual buffer
88 * @cd: pointer to command details structure or NULL
90 * This function is used to return per PF station MAC address (0x0107).
91 * NOTE: Upon successful completion of this command, MAC address information
92 * is returned in user specified buffer. Please interpret user specified
93 * buffer as "manage_mac_read" response.
94 * Response such as various MAC addresses are stored in HW struct (port.mac)
95 * ice_discover_dev_caps is expected to be called before this function is
98 static enum ice_status
99 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
100 struct ice_sq_cd *cd)
102 struct ice_aqc_manage_mac_read_resp *resp;
103 struct ice_aqc_manage_mac_read *cmd;
104 struct ice_aq_desc desc;
105 enum ice_status status;
109 cmd = &desc.params.mac_read;
111 if (buf_size < sizeof(*resp))
112 return ICE_ERR_BUF_TOO_SHORT;
114 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
116 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
120 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
121 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
123 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
124 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
128 /* A single port can report up to two (LAN and WoL) addresses */
129 for (i = 0; i < cmd->num_addr; i++)
130 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
131 ice_memcpy(hw->port_info->mac.lan_addr,
132 resp[i].mac_addr, ETH_ALEN,
134 ice_memcpy(hw->port_info->mac.perm_addr,
136 ETH_ALEN, ICE_DMA_TO_NONDMA);
143 * ice_aq_get_phy_caps - returns PHY capabilities
144 * @pi: port information structure
145 * @qual_mods: report qualified modules
146 * @report_mode: report mode capabilities
147 * @pcaps: structure for PHY capabilities to be filled
148 * @cd: pointer to command details structure or NULL
150 * Returns the various PHY capabilities supported on the Port (0x0600)
153 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
154 struct ice_aqc_get_phy_caps_data *pcaps,
155 struct ice_sq_cd *cd)
157 struct ice_aqc_get_phy_caps *cmd;
158 u16 pcaps_size = sizeof(*pcaps);
159 struct ice_aq_desc desc;
160 enum ice_status status;
163 cmd = &desc.params.get_phy;
165 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
166 return ICE_ERR_PARAM;
169 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
170 !ice_fw_supports_report_dflt_cfg(hw))
171 return ICE_ERR_PARAM;
173 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
176 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
178 cmd->param0 |= CPU_TO_LE16(report_mode);
179 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
181 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
183 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
184 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
185 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
186 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
187 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
188 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
189 pcaps->low_power_ctrl_an);
190 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
191 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
193 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
194 pcaps->link_fec_options);
195 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
196 pcaps->module_compliance_enforcement);
197 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
198 pcaps->extended_compliance_code);
199 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
200 pcaps->module_type[0]);
201 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
202 pcaps->module_type[1]);
203 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
204 pcaps->module_type[2]);
206 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
207 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
208 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
209 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
210 sizeof(pi->phy.link_info.module_type),
211 ICE_NONDMA_TO_NONDMA);
218 * ice_aq_get_link_topo_handle - get link topology node return status
219 * @pi: port information structure
220 * @node_type: requested node type
221 * @cd: pointer to command details structure or NULL
223 * Get link topology node return status for specified node type (0x06E0)
225 * Node type cage can be used to determine if cage is present. If AQC
226 * returns error (ENOENT), then no cage present. If no cage present, then
227 * connection type is backplane or BASE-T.
229 static enum ice_status
230 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
231 struct ice_sq_cd *cd)
233 struct ice_aqc_get_link_topo *cmd;
234 struct ice_aq_desc desc;
236 cmd = &desc.params.get_link_topo;
238 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
240 cmd->addr.topo_params.node_type_ctx =
241 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
242 ICE_AQC_LINK_TOPO_NODE_CTX_S);
245 cmd->addr.topo_params.node_type_ctx |=
246 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
248 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
252 * ice_is_media_cage_present
253 * @pi: port information structure
255 * Returns true if media cage is present, else false. If no cage, then
256 * media type is backplane or BASE-T.
258 static bool ice_is_media_cage_present(struct ice_port_info *pi)
260 /* Node type cage can be used to determine if cage is present. If AQC
261 * returns error (ENOENT), then no cage present. If no cage present then
262 * connection type is backplane or BASE-T.
264 return !ice_aq_get_link_topo_handle(pi,
265 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
270 * ice_get_media_type - Gets media type
271 * @pi: port information structure
273 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
275 struct ice_link_status *hw_link_info;
278 return ICE_MEDIA_UNKNOWN;
280 hw_link_info = &pi->phy.link_info;
281 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
282 /* If more than one media type is selected, report unknown */
283 return ICE_MEDIA_UNKNOWN;
285 if (hw_link_info->phy_type_low) {
286 /* 1G SGMII is a special case where some DA cable PHYs
287 * may show this as an option when it really shouldn't
288 * be since SGMII is meant to be between a MAC and a PHY
289 * in a backplane. Try to detect this case and handle it
291 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
292 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
293 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
294 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
295 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
298 switch (hw_link_info->phy_type_low) {
299 case ICE_PHY_TYPE_LOW_1000BASE_SX:
300 case ICE_PHY_TYPE_LOW_1000BASE_LX:
301 case ICE_PHY_TYPE_LOW_10GBASE_SR:
302 case ICE_PHY_TYPE_LOW_10GBASE_LR:
303 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
304 case ICE_PHY_TYPE_LOW_25GBASE_SR:
305 case ICE_PHY_TYPE_LOW_25GBASE_LR:
306 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
307 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
308 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
309 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
310 case ICE_PHY_TYPE_LOW_50GBASE_SR:
311 case ICE_PHY_TYPE_LOW_50GBASE_FR:
312 case ICE_PHY_TYPE_LOW_50GBASE_LR:
313 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
314 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
315 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
316 case ICE_PHY_TYPE_LOW_100GBASE_DR:
317 return ICE_MEDIA_FIBER;
318 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
319 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
320 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
321 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
322 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
323 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
324 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
325 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
326 return ICE_MEDIA_FIBER;
327 case ICE_PHY_TYPE_LOW_100BASE_TX:
328 case ICE_PHY_TYPE_LOW_1000BASE_T:
329 case ICE_PHY_TYPE_LOW_2500BASE_T:
330 case ICE_PHY_TYPE_LOW_5GBASE_T:
331 case ICE_PHY_TYPE_LOW_10GBASE_T:
332 case ICE_PHY_TYPE_LOW_25GBASE_T:
333 return ICE_MEDIA_BASET;
334 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
335 case ICE_PHY_TYPE_LOW_25GBASE_CR:
336 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
337 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
338 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
339 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
340 case ICE_PHY_TYPE_LOW_50GBASE_CP:
341 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
342 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
343 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
345 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
346 case ICE_PHY_TYPE_LOW_40G_XLAUI:
347 case ICE_PHY_TYPE_LOW_50G_LAUI2:
348 case ICE_PHY_TYPE_LOW_50G_AUI2:
349 case ICE_PHY_TYPE_LOW_50G_AUI1:
350 case ICE_PHY_TYPE_LOW_100G_AUI4:
351 case ICE_PHY_TYPE_LOW_100G_CAUI4:
352 if (ice_is_media_cage_present(pi))
353 return ICE_MEDIA_AUI;
355 case ICE_PHY_TYPE_LOW_1000BASE_KX:
356 case ICE_PHY_TYPE_LOW_2500BASE_KX:
357 case ICE_PHY_TYPE_LOW_2500BASE_X:
358 case ICE_PHY_TYPE_LOW_5GBASE_KR:
359 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
360 case ICE_PHY_TYPE_LOW_25GBASE_KR:
361 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
362 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
363 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
364 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
365 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
366 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
367 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
368 return ICE_MEDIA_BACKPLANE;
371 switch (hw_link_info->phy_type_high) {
372 case ICE_PHY_TYPE_HIGH_100G_AUI2:
373 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
374 if (ice_is_media_cage_present(pi))
375 return ICE_MEDIA_AUI;
377 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
378 return ICE_MEDIA_BACKPLANE;
379 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
380 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
381 return ICE_MEDIA_FIBER;
384 return ICE_MEDIA_UNKNOWN;
388 * ice_aq_get_link_info
389 * @pi: port information structure
390 * @ena_lse: enable/disable LinkStatusEvent reporting
391 * @link: pointer to link status structure - optional
392 * @cd: pointer to command details structure or NULL
394 * Get Link Status (0x607). Returns the link status of the adapter.
397 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
398 struct ice_link_status *link, struct ice_sq_cd *cd)
400 struct ice_aqc_get_link_status_data link_data = { 0 };
401 struct ice_aqc_get_link_status *resp;
402 struct ice_link_status *li_old, *li;
403 enum ice_media_type *hw_media_type;
404 struct ice_fc_info *hw_fc_info;
405 bool tx_pause, rx_pause;
406 struct ice_aq_desc desc;
407 enum ice_status status;
412 return ICE_ERR_PARAM;
414 li_old = &pi->phy.link_info_old;
415 hw_media_type = &pi->phy.media_type;
416 li = &pi->phy.link_info;
417 hw_fc_info = &pi->fc;
419 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
420 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
421 resp = &desc.params.get_link_status;
422 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
423 resp->lport_num = pi->lport;
425 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
427 if (status != ICE_SUCCESS)
430 /* save off old link status information */
433 /* update current link status information */
434 li->link_speed = LE16_TO_CPU(link_data.link_speed);
435 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
436 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
437 *hw_media_type = ice_get_media_type(pi);
438 li->link_info = link_data.link_info;
439 li->link_cfg_err = link_data.link_cfg_err;
440 li->an_info = link_data.an_info;
441 li->ext_info = link_data.ext_info;
442 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
443 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
444 li->topo_media_conflict = link_data.topo_media_conflict;
445 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
446 ICE_AQ_CFG_PACING_TYPE_M);
449 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
450 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
451 if (tx_pause && rx_pause)
452 hw_fc_info->current_mode = ICE_FC_FULL;
454 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
456 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
458 hw_fc_info->current_mode = ICE_FC_NONE;
460 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
462 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
463 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
464 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
465 (unsigned long long)li->phy_type_low);
466 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
467 (unsigned long long)li->phy_type_high);
468 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
469 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
470 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
471 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
472 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
473 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
474 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
475 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
477 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
479 /* save link status information */
483 /* flag cleared so calling functions don't call AQ again */
484 pi->phy.get_link_info = false;
490 * ice_fill_tx_timer_and_fc_thresh
491 * @hw: pointer to the HW struct
492 * @cmd: pointer to MAC cfg structure
494 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
498 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
499 struct ice_aqc_set_mac_cfg *cmd)
501 u16 fc_thres_val, tx_timer_val;
504 /* We read back the transmit timer and fc threshold value of
505 * LFC. Thus, we will use index =
506 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
508 * Also, because we are opearating on transmit timer and fc
509 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
511 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
513 /* Retrieve the transmit timer */
514 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
516 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
517 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
519 /* Retrieve the fc threshold */
520 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
521 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
523 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
528 * @hw: pointer to the HW struct
529 * @max_frame_size: Maximum Frame Size to be supported
530 * @cd: pointer to command details structure or NULL
532 * Set MAC configuration (0x0603)
535 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
537 struct ice_aqc_set_mac_cfg *cmd;
538 struct ice_aq_desc desc;
540 cmd = &desc.params.set_mac_cfg;
542 if (max_frame_size == 0)
543 return ICE_ERR_PARAM;
545 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
547 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
549 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
551 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
555 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
556 * @hw: pointer to the HW struct
558 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
560 struct ice_switch_info *sw;
561 enum ice_status status;
563 hw->switch_info = (struct ice_switch_info *)
564 ice_malloc(hw, sizeof(*hw->switch_info));
566 sw = hw->switch_info;
569 return ICE_ERR_NO_MEMORY;
571 INIT_LIST_HEAD(&sw->vsi_list_map_head);
572 sw->prof_res_bm_init = 0;
574 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
576 ice_free(hw, hw->switch_info);
583 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
584 * @hw: pointer to the HW struct
585 * @sw: pointer to switch info struct for which function clears filters
588 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
590 struct ice_vsi_list_map_info *v_pos_map;
591 struct ice_vsi_list_map_info *v_tmp_map;
592 struct ice_sw_recipe *recps;
598 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
599 ice_vsi_list_map_info, list_entry) {
600 LIST_DEL(&v_pos_map->list_entry);
601 ice_free(hw, v_pos_map);
603 recps = sw->recp_list;
604 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
605 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
607 recps[i].root_rid = i;
608 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
609 &recps[i].rg_list, ice_recp_grp_entry,
611 LIST_DEL(&rg_entry->l_entry);
612 ice_free(hw, rg_entry);
615 if (recps[i].adv_rule) {
616 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
617 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
619 ice_destroy_lock(&recps[i].filt_rule_lock);
620 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
621 &recps[i].filt_rules,
622 ice_adv_fltr_mgmt_list_entry,
624 LIST_DEL(&lst_itr->list_entry);
625 ice_free(hw, lst_itr->lkups);
626 ice_free(hw, lst_itr);
629 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
631 ice_destroy_lock(&recps[i].filt_rule_lock);
632 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
633 &recps[i].filt_rules,
634 ice_fltr_mgmt_list_entry,
636 LIST_DEL(&lst_itr->list_entry);
637 ice_free(hw, lst_itr);
640 if (recps[i].root_buf)
641 ice_free(hw, recps[i].root_buf);
643 ice_rm_sw_replay_rule_info(hw, sw);
644 ice_free(hw, sw->recp_list);
649 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
650 * @hw: pointer to the HW struct
652 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
654 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
658 * ice_get_itr_intrl_gran
659 * @hw: pointer to the HW struct
661 * Determines the ITR/INTRL granularities based on the maximum aggregate
662 * bandwidth according to the device's configuration during power-on.
664 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
666 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
667 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
668 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
670 switch (max_agg_bw) {
671 case ICE_MAX_AGG_BW_200G:
672 case ICE_MAX_AGG_BW_100G:
673 case ICE_MAX_AGG_BW_50G:
674 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
675 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
677 case ICE_MAX_AGG_BW_25G:
678 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
679 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
685 * ice_print_rollback_msg - print FW rollback message
686 * @hw: pointer to the hardware structure
688 void ice_print_rollback_msg(struct ice_hw *hw)
690 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
691 struct ice_orom_info *orom;
692 struct ice_nvm_info *nvm;
694 orom = &hw->flash.orom;
695 nvm = &hw->flash.nvm;
697 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
698 nvm->major, nvm->minor, nvm->eetrack, orom->major,
699 orom->build, orom->patch);
701 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
702 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
706 * ice_init_hw - main hardware initialization routine
707 * @hw: pointer to the hardware structure
709 enum ice_status ice_init_hw(struct ice_hw *hw)
711 struct ice_aqc_get_phy_caps_data *pcaps;
712 enum ice_status status;
716 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
718 /* Set MAC type based on DeviceID */
719 status = ice_set_mac_type(hw);
723 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
724 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
725 PF_FUNC_RID_FUNCTION_NUMBER_S;
727 status = ice_reset(hw, ICE_RESET_PFR);
731 ice_get_itr_intrl_gran(hw);
733 status = ice_create_all_ctrlq(hw);
735 goto err_unroll_cqinit;
737 status = ice_init_nvm(hw);
739 goto err_unroll_cqinit;
741 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
742 ice_print_rollback_msg(hw);
744 status = ice_clear_pf_cfg(hw);
746 goto err_unroll_cqinit;
748 /* Set bit to enable Flow Director filters */
749 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
750 INIT_LIST_HEAD(&hw->fdir_list_head);
752 ice_clear_pxe_mode(hw);
754 status = ice_get_caps(hw);
756 goto err_unroll_cqinit;
758 hw->port_info = (struct ice_port_info *)
759 ice_malloc(hw, sizeof(*hw->port_info));
760 if (!hw->port_info) {
761 status = ICE_ERR_NO_MEMORY;
762 goto err_unroll_cqinit;
765 /* set the back pointer to HW */
766 hw->port_info->hw = hw;
768 /* Initialize port_info struct with switch configuration data */
769 status = ice_get_initial_sw_cfg(hw);
771 goto err_unroll_alloc;
774 /* Query the allocated resources for Tx scheduler */
775 status = ice_sched_query_res_alloc(hw);
777 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
778 goto err_unroll_alloc;
780 ice_sched_get_psm_clk_freq(hw);
782 /* Initialize port_info struct with scheduler data */
783 status = ice_sched_init_port(hw->port_info);
785 goto err_unroll_sched;
786 pcaps = (struct ice_aqc_get_phy_caps_data *)
787 ice_malloc(hw, sizeof(*pcaps));
789 status = ICE_ERR_NO_MEMORY;
790 goto err_unroll_sched;
793 /* Initialize port_info struct with PHY capabilities */
794 status = ice_aq_get_phy_caps(hw->port_info, false,
795 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
798 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
801 /* Initialize port_info struct with link information */
802 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
804 goto err_unroll_sched;
805 /* need a valid SW entry point to build a Tx tree */
806 if (!hw->sw_entry_point_layer) {
807 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
808 status = ICE_ERR_CFG;
809 goto err_unroll_sched;
811 INIT_LIST_HEAD(&hw->agg_list);
812 /* Initialize max burst size */
813 if (!hw->max_burst_size)
814 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
815 status = ice_init_fltr_mgmt_struct(hw);
817 goto err_unroll_sched;
819 /* Get MAC information */
820 /* A single port can report up to two (LAN and WoL) addresses */
821 mac_buf = ice_calloc(hw, 2,
822 sizeof(struct ice_aqc_manage_mac_read_resp));
823 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
826 status = ICE_ERR_NO_MEMORY;
827 goto err_unroll_fltr_mgmt_struct;
830 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
831 ice_free(hw, mac_buf);
834 goto err_unroll_fltr_mgmt_struct;
835 /* enable jumbo frame support at MAC level */
836 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
838 goto err_unroll_fltr_mgmt_struct;
839 /* Obtain counter base index which would be used by flow director */
840 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
842 goto err_unroll_fltr_mgmt_struct;
843 status = ice_init_hw_tbls(hw);
845 goto err_unroll_fltr_mgmt_struct;
846 ice_init_lock(&hw->tnl_lock);
850 err_unroll_fltr_mgmt_struct:
851 ice_cleanup_fltr_mgmt_struct(hw);
853 ice_sched_cleanup_all(hw);
855 ice_free(hw, hw->port_info);
856 hw->port_info = NULL;
858 ice_destroy_all_ctrlq(hw);
863 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
864 * @hw: pointer to the hardware structure
866 * This should be called only during nominal operation, not as a result of
867 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
868 * applicable initializations if it fails for any reason.
870 void ice_deinit_hw(struct ice_hw *hw)
872 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
873 ice_cleanup_fltr_mgmt_struct(hw);
875 ice_sched_cleanup_all(hw);
876 ice_sched_clear_agg(hw);
878 ice_free_hw_tbls(hw);
879 ice_destroy_lock(&hw->tnl_lock);
882 ice_free(hw, hw->port_info);
883 hw->port_info = NULL;
886 ice_destroy_all_ctrlq(hw);
888 /* Clear VSI contexts if not already cleared */
889 ice_clear_all_vsi_ctx(hw);
893 * ice_check_reset - Check to see if a global reset is complete
894 * @hw: pointer to the hardware structure
896 enum ice_status ice_check_reset(struct ice_hw *hw)
898 u32 cnt, reg = 0, grst_timeout, uld_mask;
900 /* Poll for Device Active state in case a recent CORER, GLOBR,
901 * or EMPR has occurred. The grst delay value is in 100ms units.
902 * Add 1sec for outstanding AQ commands that can take a long time.
904 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
905 GLGEN_RSTCTL_GRSTDEL_S) + 10;
907 for (cnt = 0; cnt < grst_timeout; cnt++) {
908 ice_msec_delay(100, true);
909 reg = rd32(hw, GLGEN_RSTAT);
910 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
914 if (cnt == grst_timeout) {
915 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
916 return ICE_ERR_RESET_FAILED;
919 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
920 GLNVM_ULD_PCIER_DONE_1_M |\
921 GLNVM_ULD_CORER_DONE_M |\
922 GLNVM_ULD_GLOBR_DONE_M |\
923 GLNVM_ULD_POR_DONE_M |\
924 GLNVM_ULD_POR_DONE_1_M |\
925 GLNVM_ULD_PCIER_DONE_2_M)
927 uld_mask = ICE_RESET_DONE_MASK;
929 /* Device is Active; check Global Reset processes are done */
930 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
931 reg = rd32(hw, GLNVM_ULD) & uld_mask;
932 if (reg == uld_mask) {
933 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
936 ice_msec_delay(10, true);
939 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
940 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
942 return ICE_ERR_RESET_FAILED;
949 * ice_pf_reset - Reset the PF
950 * @hw: pointer to the hardware structure
952 * If a global reset has been triggered, this function checks
953 * for its completion and then issues the PF reset
955 static enum ice_status ice_pf_reset(struct ice_hw *hw)
959 /* If at function entry a global reset was already in progress, i.e.
960 * state is not 'device active' or any of the reset done bits are not
961 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
962 * global reset is done.
964 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
965 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
966 /* poll on global reset currently in progress until done */
967 if (ice_check_reset(hw))
968 return ICE_ERR_RESET_FAILED;
974 reg = rd32(hw, PFGEN_CTRL);
976 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
978 /* Wait for the PFR to complete. The wait time is the global config lock
979 * timeout plus the PFR timeout which will account for a possible reset
980 * that is occurring during a download package operation.
982 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
983 ICE_PF_RESET_WAIT_COUNT; cnt++) {
984 reg = rd32(hw, PFGEN_CTRL);
985 if (!(reg & PFGEN_CTRL_PFSWR_M))
988 ice_msec_delay(1, true);
991 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
992 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
993 return ICE_ERR_RESET_FAILED;
1000 * ice_reset - Perform different types of reset
1001 * @hw: pointer to the hardware structure
1002 * @req: reset request
1004 * This function triggers a reset as specified by the req parameter.
1007 * If anything other than a PF reset is triggered, PXE mode is restored.
1008 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1009 * interface has been restored in the rebuild flow.
1011 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1017 return ice_pf_reset(hw);
1018 case ICE_RESET_CORER:
1019 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1020 val = GLGEN_RTRIG_CORER_M;
1022 case ICE_RESET_GLOBR:
1023 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1024 val = GLGEN_RTRIG_GLOBR_M;
1027 return ICE_ERR_PARAM;
1030 val |= rd32(hw, GLGEN_RTRIG);
1031 wr32(hw, GLGEN_RTRIG, val);
1034 /* wait for the FW to be ready */
1035 return ice_check_reset(hw);
1039 * ice_copy_rxq_ctx_to_hw
1040 * @hw: pointer to the hardware structure
1041 * @ice_rxq_ctx: pointer to the rxq context
1042 * @rxq_index: the index of the Rx queue
1044 * Copies rxq context from dense structure to HW register space
1046 static enum ice_status
1047 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1052 return ICE_ERR_BAD_PTR;
1054 if (rxq_index > QRX_CTRL_MAX_INDEX)
1055 return ICE_ERR_PARAM;
1057 /* Copy each dword separately to HW */
1058 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1059 wr32(hw, QRX_CONTEXT(i, rxq_index),
1060 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1062 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1063 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1069 /* LAN Rx Queue Context */
1070 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1071 /* Field Width LSB */
1072 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1073 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1074 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1075 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1076 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1077 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1078 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1079 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1080 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1081 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1082 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1083 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1084 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1085 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1086 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1087 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1088 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1089 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1090 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1091 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1097 * @hw: pointer to the hardware structure
1098 * @rlan_ctx: pointer to the rxq context
1099 * @rxq_index: the index of the Rx queue
1101 * Converts rxq context from sparse to dense structure and then writes
1102 * it to HW register space and enables the hardware to prefetch descriptors
1103 * instead of only fetching them on demand
1106 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1109 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1112 return ICE_ERR_BAD_PTR;
1114 rlan_ctx->prefena = 1;
1116 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1117 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1122 * @hw: pointer to the hardware structure
1123 * @rxq_index: the index of the Rx queue to clear
1125 * Clears rxq context in HW register space
1127 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1131 if (rxq_index > QRX_CTRL_MAX_INDEX)
1132 return ICE_ERR_PARAM;
1134 /* Clear each dword register separately */
1135 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1136 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1141 /* LAN Tx Queue Context */
1142 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1143 /* Field Width LSB */
1144 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1145 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1146 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1147 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1148 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1149 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1150 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1151 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1152 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1153 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1154 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1155 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1156 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1157 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1158 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1159 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1160 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1161 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1162 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1163 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1164 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1165 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1166 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1167 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1168 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1169 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1170 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1171 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1176 * ice_copy_tx_cmpltnq_ctx_to_hw
1177 * @hw: pointer to the hardware structure
1178 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1179 * @tx_cmpltnq_index: the index of the completion queue
1181 * Copies Tx completion queue context from dense structure to HW register space
1183 static enum ice_status
1184 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1185 u32 tx_cmpltnq_index)
1189 if (!ice_tx_cmpltnq_ctx)
1190 return ICE_ERR_BAD_PTR;
1192 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1193 return ICE_ERR_PARAM;
1195 /* Copy each dword separately to HW */
1196 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1197 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1198 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1200 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1201 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1207 /* LAN Tx Completion Queue Context */
1208 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1209 /* Field Width LSB */
1210 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1211 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1212 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1213 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1214 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1215 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1216 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1217 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1218 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1219 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1224 * ice_write_tx_cmpltnq_ctx
1225 * @hw: pointer to the hardware structure
1226 * @tx_cmpltnq_ctx: pointer to the completion queue context
1227 * @tx_cmpltnq_index: the index of the completion queue
1229 * Converts completion queue context from sparse to dense structure and then
1230 * writes it to HW register space
1233 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1234 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1235 u32 tx_cmpltnq_index)
1237 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1239 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1240 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1244 * ice_clear_tx_cmpltnq_ctx
1245 * @hw: pointer to the hardware structure
1246 * @tx_cmpltnq_index: the index of the completion queue to clear
1248 * Clears Tx completion queue context in HW register space
1251 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1255 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1256 return ICE_ERR_PARAM;
1258 /* Clear each dword register separately */
1259 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1260 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1266 * ice_copy_tx_drbell_q_ctx_to_hw
1267 * @hw: pointer to the hardware structure
1268 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1269 * @tx_drbell_q_index: the index of the doorbell queue
1271 * Copies doorbell queue context from dense structure to HW register space
1273 static enum ice_status
1274 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1275 u32 tx_drbell_q_index)
1279 if (!ice_tx_drbell_q_ctx)
1280 return ICE_ERR_BAD_PTR;
1282 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1283 return ICE_ERR_PARAM;
1285 /* Copy each dword separately to HW */
1286 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1287 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1288 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1290 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1291 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1297 /* LAN Tx Doorbell Queue Context info */
1298 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1299 /* Field Width LSB */
1300 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1301 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1302 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1303 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1304 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1305 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1306 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1307 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1308 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1309 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1310 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1315 * ice_write_tx_drbell_q_ctx
1316 * @hw: pointer to the hardware structure
1317 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1318 * @tx_drbell_q_index: the index of the doorbell queue
1320 * Converts doorbell queue context from sparse to dense structure and then
1321 * writes it to HW register space
1324 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1325 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1326 u32 tx_drbell_q_index)
1328 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1330 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1331 ice_tx_drbell_q_ctx_info);
1332 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1336 * ice_clear_tx_drbell_q_ctx
1337 * @hw: pointer to the hardware structure
1338 * @tx_drbell_q_index: the index of the doorbell queue to clear
1340 * Clears doorbell queue context in HW register space
1343 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1347 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1348 return ICE_ERR_PARAM;
1350 /* Clear each dword register separately */
1351 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1352 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1357 /* FW Admin Queue command wrappers */
1360 * ice_should_retry_sq_send_cmd
1361 * @opcode: AQ opcode
1363 * Decide if we should retry the send command routine for the ATQ, depending
1366 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1369 case ice_aqc_opc_get_link_topo:
1370 case ice_aqc_opc_lldp_stop:
1371 case ice_aqc_opc_lldp_start:
1372 case ice_aqc_opc_lldp_filter_ctrl:
1380 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1381 * @hw: pointer to the HW struct
1382 * @cq: pointer to the specific Control queue
1383 * @desc: prefilled descriptor describing the command
1384 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1385 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1386 * @cd: pointer to command details structure
1388 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1389 * Queue if the EBUSY AQ error is returned.
1391 static enum ice_status
1392 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1393 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1394 struct ice_sq_cd *cd)
1396 struct ice_aq_desc desc_cpy;
1397 enum ice_status status;
1398 bool is_cmd_for_retry;
1403 opcode = LE16_TO_CPU(desc->opcode);
1404 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1405 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1407 if (is_cmd_for_retry) {
1409 buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1411 return ICE_ERR_NO_MEMORY;
1414 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1415 ICE_NONDMA_TO_NONDMA);
1419 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1421 if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1422 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1426 ice_memcpy(buf, buf_cpy, buf_size,
1427 ICE_NONDMA_TO_NONDMA);
1429 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1430 ICE_NONDMA_TO_NONDMA);
1432 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1434 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1437 ice_free(hw, buf_cpy);
1443 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1444 * @hw: pointer to the HW struct
1445 * @desc: descriptor describing the command
1446 * @buf: buffer to use for indirect commands (NULL for direct commands)
1447 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1448 * @cd: pointer to command details structure
1450 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1453 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1454 u16 buf_size, struct ice_sq_cd *cd)
1456 if (hw->aq_send_cmd_fn) {
1457 enum ice_status status = ICE_ERR_NOT_READY;
1458 u16 retval = ICE_AQ_RC_OK;
1460 ice_acquire_lock(&hw->adminq.sq_lock);
1461 if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
1463 retval = LE16_TO_CPU(desc->retval);
1464 /* strip off FW internal code */
1467 if (retval == ICE_AQ_RC_OK)
1468 status = ICE_SUCCESS;
1470 status = ICE_ERR_AQ_ERROR;
1473 hw->adminq.sq_last_status = (enum ice_aq_err)retval;
1474 ice_release_lock(&hw->adminq.sq_lock);
1478 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1483 * @hw: pointer to the HW struct
1484 * @cd: pointer to command details structure or NULL
1486 * Get the firmware version (0x0001) from the admin queue commands
1488 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1490 struct ice_aqc_get_ver *resp;
1491 struct ice_aq_desc desc;
1492 enum ice_status status;
1494 resp = &desc.params.get_ver;
1496 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1498 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1501 hw->fw_branch = resp->fw_branch;
1502 hw->fw_maj_ver = resp->fw_major;
1503 hw->fw_min_ver = resp->fw_minor;
1504 hw->fw_patch = resp->fw_patch;
1505 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1506 hw->api_branch = resp->api_branch;
1507 hw->api_maj_ver = resp->api_major;
1508 hw->api_min_ver = resp->api_minor;
1509 hw->api_patch = resp->api_patch;
1516 * ice_aq_send_driver_ver
1517 * @hw: pointer to the HW struct
1518 * @dv: driver's major, minor version
1519 * @cd: pointer to command details structure or NULL
1521 * Send the driver version (0x0002) to the firmware
1524 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1525 struct ice_sq_cd *cd)
1527 struct ice_aqc_driver_ver *cmd;
1528 struct ice_aq_desc desc;
1531 cmd = &desc.params.driver_ver;
1534 return ICE_ERR_PARAM;
1536 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1538 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1539 cmd->major_ver = dv->major_ver;
1540 cmd->minor_ver = dv->minor_ver;
1541 cmd->build_ver = dv->build_ver;
1542 cmd->subbuild_ver = dv->subbuild_ver;
1545 while (len < sizeof(dv->driver_string) &&
1546 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1549 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1554 * @hw: pointer to the HW struct
1555 * @unloading: is the driver unloading itself
1557 * Tell the Firmware that we're shutting down the AdminQ and whether
1558 * or not the driver is unloading as well (0x0003).
1560 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1562 struct ice_aqc_q_shutdown *cmd;
1563 struct ice_aq_desc desc;
1565 cmd = &desc.params.q_shutdown;
1567 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1570 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1572 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1577 * @hw: pointer to the HW struct
1579 * @access: access type
1580 * @sdp_number: resource number
1581 * @timeout: the maximum time in ms that the driver may hold the resource
1582 * @cd: pointer to command details structure or NULL
1584 * Requests common resource using the admin queue commands (0x0008).
1585 * When attempting to acquire the Global Config Lock, the driver can
1586 * learn of three states:
1587 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1588 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1589 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1590 * successfully downloaded the package; the driver does
1591 * not have to download the package and can continue
1594 * Note that if the caller is in an acquire lock, perform action, release lock
1595 * phase of operation, it is possible that the FW may detect a timeout and issue
1596 * a CORER. In this case, the driver will receive a CORER interrupt and will
1597 * have to determine its cause. The calling thread that is handling this flow
1598 * will likely get an error propagated back to it indicating the Download
1599 * Package, Update Package or the Release Resource AQ commands timed out.
1601 static enum ice_status
1602 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1603 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1604 struct ice_sq_cd *cd)
1606 struct ice_aqc_req_res *cmd_resp;
1607 struct ice_aq_desc desc;
1608 enum ice_status status;
1610 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1612 cmd_resp = &desc.params.res_owner;
1614 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1616 cmd_resp->res_id = CPU_TO_LE16(res);
1617 cmd_resp->access_type = CPU_TO_LE16(access);
1618 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1619 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1622 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1624 /* The completion specifies the maximum time in ms that the driver
1625 * may hold the resource in the Timeout field.
1628 /* Global config lock response utilizes an additional status field.
1630 * If the Global config lock resource is held by some other driver, the
1631 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1632 * and the timeout field indicates the maximum time the current owner
1633 * of the resource has to free it.
1635 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1636 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1637 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1639 } else if (LE16_TO_CPU(cmd_resp->status) ==
1640 ICE_AQ_RES_GLBL_IN_PROG) {
1641 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1642 return ICE_ERR_AQ_ERROR;
1643 } else if (LE16_TO_CPU(cmd_resp->status) ==
1644 ICE_AQ_RES_GLBL_DONE) {
1645 return ICE_ERR_AQ_NO_WORK;
1648 /* invalid FW response, force a timeout immediately */
1650 return ICE_ERR_AQ_ERROR;
1653 /* If the resource is held by some other driver, the command completes
1654 * with a busy return value and the timeout field indicates the maximum
1655 * time the current owner of the resource has to free it.
1657 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1658 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1664 * ice_aq_release_res
1665 * @hw: pointer to the HW struct
1667 * @sdp_number: resource number
1668 * @cd: pointer to command details structure or NULL
1670 * release common resource using the admin queue commands (0x0009)
1672 static enum ice_status
1673 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1674 struct ice_sq_cd *cd)
1676 struct ice_aqc_req_res *cmd;
1677 struct ice_aq_desc desc;
1679 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1681 cmd = &desc.params.res_owner;
1683 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1685 cmd->res_id = CPU_TO_LE16(res);
1686 cmd->res_number = CPU_TO_LE32(sdp_number);
1688 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1693 * @hw: pointer to the HW structure
1695 * @access: access type (read or write)
1696 * @timeout: timeout in milliseconds
1698 * This function will attempt to acquire the ownership of a resource.
1701 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1702 enum ice_aq_res_access_type access, u32 timeout)
1704 #define ICE_RES_POLLING_DELAY_MS 10
1705 u32 delay = ICE_RES_POLLING_DELAY_MS;
1706 u32 time_left = timeout;
1707 enum ice_status status;
1709 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1711 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1713 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1714 * previously acquired the resource and performed any necessary updates;
1715 * in this case the caller does not obtain the resource and has no
1716 * further work to do.
1718 if (status == ICE_ERR_AQ_NO_WORK)
1719 goto ice_acquire_res_exit;
1722 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1724 /* If necessary, poll until the current lock owner timeouts */
1725 timeout = time_left;
1726 while (status && timeout && time_left) {
1727 ice_msec_delay(delay, true);
1728 timeout = (timeout > delay) ? timeout - delay : 0;
1729 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1731 if (status == ICE_ERR_AQ_NO_WORK)
1732 /* lock free, but no work to do */
1739 if (status && status != ICE_ERR_AQ_NO_WORK)
1740 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1742 ice_acquire_res_exit:
1743 if (status == ICE_ERR_AQ_NO_WORK) {
1744 if (access == ICE_RES_WRITE)
1745 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1747 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1754 * @hw: pointer to the HW structure
1757 * This function will release a resource using the proper Admin Command.
1759 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1761 enum ice_status status;
1762 u32 total_delay = 0;
1764 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1766 status = ice_aq_release_res(hw, res, 0, NULL);
1768 /* there are some rare cases when trying to release the resource
1769 * results in an admin queue timeout, so handle them correctly
1771 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1772 (total_delay < hw->adminq.sq_cmd_timeout)) {
1773 ice_msec_delay(1, true);
1774 status = ice_aq_release_res(hw, res, 0, NULL);
1780 * ice_aq_alloc_free_res - command to allocate/free resources
1781 * @hw: pointer to the HW struct
1782 * @num_entries: number of resource entries in buffer
1783 * @buf: Indirect buffer to hold data parameters and response
1784 * @buf_size: size of buffer for indirect commands
1785 * @opc: pass in the command opcode
1786 * @cd: pointer to command details structure or NULL
1788 * Helper function to allocate/free resources using the admin queue commands
1791 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1792 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1793 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1795 struct ice_aqc_alloc_free_res_cmd *cmd;
1796 struct ice_aq_desc desc;
1798 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1800 cmd = &desc.params.sw_res_ctrl;
1803 return ICE_ERR_PARAM;
1805 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
1806 return ICE_ERR_PARAM;
1808 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1810 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1812 cmd->num_entries = CPU_TO_LE16(num_entries);
1814 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1818 * ice_alloc_hw_res - allocate resource
1819 * @hw: pointer to the HW struct
1820 * @type: type of resource
1821 * @num: number of resources to allocate
1822 * @btm: allocate from bottom
1823 * @res: pointer to array that will receive the resources
1826 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1828 struct ice_aqc_alloc_free_res_elem *buf;
1829 enum ice_status status;
1832 buf_len = ice_struct_size(buf, elem, num);
1833 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1835 return ICE_ERR_NO_MEMORY;
1837 /* Prepare buffer to allocate resource. */
1838 buf->num_elems = CPU_TO_LE16(num);
1839 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1840 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1842 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1844 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1845 ice_aqc_opc_alloc_res, NULL);
1847 goto ice_alloc_res_exit;
1849 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
1850 ICE_NONDMA_TO_NONDMA);
1858 * ice_free_hw_res - free allocated HW resource
1859 * @hw: pointer to the HW struct
1860 * @type: type of resource to free
1861 * @num: number of resources
1862 * @res: pointer to array that contains the resources to free
1864 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1866 struct ice_aqc_alloc_free_res_elem *buf;
1867 enum ice_status status;
1870 buf_len = ice_struct_size(buf, elem, num);
1871 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1873 return ICE_ERR_NO_MEMORY;
1875 /* Prepare buffer to free resource. */
1876 buf->num_elems = CPU_TO_LE16(num);
1877 buf->res_type = CPU_TO_LE16(type);
1878 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
1879 ICE_NONDMA_TO_NONDMA);
1881 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1882 ice_aqc_opc_free_res, NULL);
1884 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1891 * ice_get_num_per_func - determine number of resources per PF
1892 * @hw: pointer to the HW structure
1893 * @max: value to be evenly split between each PF
1895 * Determine the number of valid functions by going through the bitmap returned
1896 * from parsing capabilities and use this to calculate the number of resources
1897 * per PF based on the max value passed in.
1899 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1903 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1904 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1905 ICE_CAPS_VALID_FUNCS_M);
1914 * ice_parse_common_caps - parse common device/function capabilities
1915 * @hw: pointer to the HW struct
1916 * @caps: pointer to common capabilities structure
1917 * @elem: the capability element to parse
1918 * @prefix: message prefix for tracing capabilities
1920 * Given a capability element, extract relevant details into the common
1921 * capability structure.
1923 * Returns: true if the capability matches one of the common capability ids,
1927 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1928 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1930 u32 logical_id = LE32_TO_CPU(elem->logical_id);
1931 u32 phys_id = LE32_TO_CPU(elem->phys_id);
1932 u32 number = LE32_TO_CPU(elem->number);
1933 u16 cap = LE16_TO_CPU(elem->cap);
1937 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1938 caps->valid_functions = number;
1939 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1940 caps->valid_functions);
1942 case ICE_AQC_CAPS_DCB:
1943 caps->dcb = (number == 1);
1944 caps->active_tc_bitmap = logical_id;
1945 caps->maxtc = phys_id;
1946 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1947 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1948 caps->active_tc_bitmap);
1949 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1951 case ICE_AQC_CAPS_RSS:
1952 caps->rss_table_size = number;
1953 caps->rss_table_entry_width = logical_id;
1954 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1955 caps->rss_table_size);
1956 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1957 caps->rss_table_entry_width);
1959 case ICE_AQC_CAPS_RXQS:
1960 caps->num_rxq = number;
1961 caps->rxq_first_id = phys_id;
1962 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1964 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1965 caps->rxq_first_id);
1967 case ICE_AQC_CAPS_TXQS:
1968 caps->num_txq = number;
1969 caps->txq_first_id = phys_id;
1970 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1972 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1973 caps->txq_first_id);
1975 case ICE_AQC_CAPS_MSIX:
1976 caps->num_msix_vectors = number;
1977 caps->msix_vector_first_id = phys_id;
1978 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1979 caps->num_msix_vectors);
1980 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1981 caps->msix_vector_first_id);
1983 case ICE_AQC_CAPS_NVM_MGMT:
1984 caps->sec_rev_disabled =
1985 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
1987 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
1988 caps->sec_rev_disabled);
1989 caps->update_disabled =
1990 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
1992 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
1993 caps->update_disabled);
1994 caps->nvm_unified_update =
1995 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1997 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1998 caps->nvm_unified_update);
2000 case ICE_AQC_CAPS_MAX_MTU:
2001 caps->max_mtu = number;
2002 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2003 prefix, caps->max_mtu);
2005 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2006 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2007 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2008 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2010 u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
2012 if (index >= ICE_EXT_TOPO_DEV_IMG_COUNT)
2015 caps->ext_topo_dev_img_ver_high[index] = number;
2016 caps->ext_topo_dev_img_ver_low[index] = logical_id;
2017 caps->ext_topo_dev_img_part_num[index] =
2018 (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2019 ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2020 caps->ext_topo_dev_img_load_en[index] =
2021 (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2022 caps->ext_topo_dev_img_prog_en[index] =
2023 (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2024 ice_debug(hw, ICE_DBG_INIT,
2025 "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
2027 caps->ext_topo_dev_img_ver_high[index]);
2028 ice_debug(hw, ICE_DBG_INIT,
2029 "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
2031 caps->ext_topo_dev_img_ver_low[index]);
2032 ice_debug(hw, ICE_DBG_INIT,
2033 "%s: ext_topo_dev_img_part_num[%d] = %d\n",
2035 caps->ext_topo_dev_img_part_num[index]);
2036 ice_debug(hw, ICE_DBG_INIT,
2037 "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2039 caps->ext_topo_dev_img_load_en[index]);
2040 ice_debug(hw, ICE_DBG_INIT,
2041 "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2043 caps->ext_topo_dev_img_prog_en[index]);
2047 /* Not one of the recognized common capabilities */
2055 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2056 * @hw: pointer to the HW structure
2057 * @caps: pointer to capabilities structure to fix
2059 * Re-calculate the capabilities that are dependent on the number of physical
2060 * ports; i.e. some features are not supported or function differently on
2061 * devices with more than 4 ports.
2064 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2066 /* This assumes device capabilities are always scanned before function
2067 * capabilities during the initialization flow.
2069 if (hw->dev_caps.num_funcs > 4) {
2070 /* Max 4 TCs per port */
2072 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2078 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2079 * @hw: pointer to the HW struct
2080 * @func_p: pointer to function capabilities structure
2081 * @cap: pointer to the capability element to parse
2083 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2086 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2087 struct ice_aqc_list_caps_elem *cap)
2089 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2090 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2091 LE32_TO_CPU(cap->number));
2092 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2093 func_p->guar_num_vsi);
2097 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2098 * @hw: pointer to the HW struct
2099 * @func_p: pointer to function capabilities structure
2101 * Extract function capabilities for ICE_AQC_CAPS_FD.
2104 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2108 if (hw->dcf_enabled)
2110 reg_val = rd32(hw, GLQF_FD_SIZE);
2111 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2112 GLQF_FD_SIZE_FD_GSIZE_S;
2113 func_p->fd_fltr_guar =
2114 ice_get_num_per_func(hw, val);
2115 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2116 GLQF_FD_SIZE_FD_BSIZE_S;
2117 func_p->fd_fltr_best_effort = val;
2119 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2120 func_p->fd_fltr_guar);
2121 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2122 func_p->fd_fltr_best_effort);
2126 * ice_parse_func_caps - Parse function capabilities
2127 * @hw: pointer to the HW struct
2128 * @func_p: pointer to function capabilities structure
2129 * @buf: buffer containing the function capability records
2130 * @cap_count: the number of capabilities
2132 * Helper function to parse function (0x000A) capabilities list. For
2133 * capabilities shared between device and function, this relies on
2134 * ice_parse_common_caps.
2136 * Loop through the list of provided capabilities and extract the relevant
2137 * data into the function capabilities structured.
2140 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2141 void *buf, u32 cap_count)
2143 struct ice_aqc_list_caps_elem *cap_resp;
2146 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2148 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2150 for (i = 0; i < cap_count; i++) {
2151 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2154 found = ice_parse_common_caps(hw, &func_p->common_cap,
2155 &cap_resp[i], "func caps");
2158 case ICE_AQC_CAPS_VSI:
2159 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2161 case ICE_AQC_CAPS_FD:
2162 ice_parse_fdir_func_caps(hw, func_p);
2165 /* Don't list common capabilities as unknown */
2167 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2173 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2177 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2178 * @hw: pointer to the HW struct
2179 * @dev_p: pointer to device capabilities structure
2180 * @cap: capability element to parse
2182 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2185 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2186 struct ice_aqc_list_caps_elem *cap)
2188 u32 number = LE32_TO_CPU(cap->number);
2190 dev_p->num_funcs = ice_hweight32(number);
2191 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2196 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2197 * @hw: pointer to the HW struct
2198 * @dev_p: pointer to device capabilities structure
2199 * @cap: capability element to parse
2201 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2204 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2205 struct ice_aqc_list_caps_elem *cap)
2207 u32 number = LE32_TO_CPU(cap->number);
2209 dev_p->num_vsi_allocd_to_host = number;
2210 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2211 dev_p->num_vsi_allocd_to_host);
2215 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2216 * @hw: pointer to the HW struct
2217 * @dev_p: pointer to device capabilities structure
2218 * @cap: capability element to parse
2220 * Parse ICE_AQC_CAPS_FD for device capabilities.
2223 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2224 struct ice_aqc_list_caps_elem *cap)
2226 u32 number = LE32_TO_CPU(cap->number);
2228 dev_p->num_flow_director_fltr = number;
2229 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2230 dev_p->num_flow_director_fltr);
2234 * ice_parse_dev_caps - Parse device capabilities
2235 * @hw: pointer to the HW struct
2236 * @dev_p: pointer to device capabilities structure
2237 * @buf: buffer containing the device capability records
2238 * @cap_count: the number of capabilities
2240 * Helper device to parse device (0x000B) capabilities list. For
2241 * capabilities shared between device and function, this relies on
2242 * ice_parse_common_caps.
2244 * Loop through the list of provided capabilities and extract the relevant
2245 * data into the device capabilities structured.
2248 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2249 void *buf, u32 cap_count)
2251 struct ice_aqc_list_caps_elem *cap_resp;
2254 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2256 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2258 for (i = 0; i < cap_count; i++) {
2259 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2262 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2263 &cap_resp[i], "dev caps");
2266 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2267 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2269 case ICE_AQC_CAPS_VSI:
2270 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2272 case ICE_AQC_CAPS_FD:
2273 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2276 /* Don't list common capabilities as unknown */
2278 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2284 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2288 * ice_aq_list_caps - query function/device capabilities
2289 * @hw: pointer to the HW struct
2290 * @buf: a buffer to hold the capabilities
2291 * @buf_size: size of the buffer
2292 * @cap_count: if not NULL, set to the number of capabilities reported
2293 * @opc: capabilities type to discover, device or function
2294 * @cd: pointer to command details structure or NULL
2296 * Get the function (0x000A) or device (0x000B) capabilities description from
2297 * firmware and store it in the buffer.
2299 * If the cap_count pointer is not NULL, then it is set to the number of
2300 * capabilities firmware will report. Note that if the buffer size is too
2301 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2302 * cap_count will still be updated in this case. It is recommended that the
2303 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2304 * firmware could return) to avoid this.
2306 static enum ice_status
2307 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2308 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2310 struct ice_aqc_list_caps *cmd;
2311 struct ice_aq_desc desc;
2312 enum ice_status status;
2314 cmd = &desc.params.get_cap;
2316 if (opc != ice_aqc_opc_list_func_caps &&
2317 opc != ice_aqc_opc_list_dev_caps)
2318 return ICE_ERR_PARAM;
2320 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2321 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2324 *cap_count = LE32_TO_CPU(cmd->count);
2330 * ice_discover_dev_caps - Read and extract device capabilities
2331 * @hw: pointer to the hardware structure
2332 * @dev_caps: pointer to device capabilities structure
2334 * Read the device capabilities and extract them into the dev_caps structure
2337 static enum ice_status
2338 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2340 enum ice_status status;
2344 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2346 return ICE_ERR_NO_MEMORY;
2348 /* Although the driver doesn't know the number of capabilities the
2349 * device will return, we can simply send a 4KB buffer, the maximum
2350 * possible size that firmware can return.
2352 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2354 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2355 ice_aqc_opc_list_dev_caps, NULL);
2357 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2364 * ice_discover_func_caps - Read and extract function capabilities
2365 * @hw: pointer to the hardware structure
2366 * @func_caps: pointer to function capabilities structure
2368 * Read the function capabilities and extract them into the func_caps structure
2371 static enum ice_status
2372 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2374 enum ice_status status;
2378 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2380 return ICE_ERR_NO_MEMORY;
2382 /* Although the driver doesn't know the number of capabilities the
2383 * device will return, we can simply send a 4KB buffer, the maximum
2384 * possible size that firmware can return.
2386 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2388 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2389 ice_aqc_opc_list_func_caps, NULL);
2391 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2398 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2399 * @hw: pointer to the hardware structure
2401 void ice_set_safe_mode_caps(struct ice_hw *hw)
2403 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2404 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2405 struct ice_hw_common_caps cached_caps;
2408 /* cache some func_caps values that should be restored after memset */
2409 cached_caps = func_caps->common_cap;
2411 /* unset func capabilities */
2412 memset(func_caps, 0, sizeof(*func_caps));
2414 #define ICE_RESTORE_FUNC_CAP(name) \
2415 func_caps->common_cap.name = cached_caps.name
2417 /* restore cached values */
2418 ICE_RESTORE_FUNC_CAP(valid_functions);
2419 ICE_RESTORE_FUNC_CAP(txq_first_id);
2420 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2421 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2422 ICE_RESTORE_FUNC_CAP(max_mtu);
2423 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2425 /* one Tx and one Rx queue in safe mode */
2426 func_caps->common_cap.num_rxq = 1;
2427 func_caps->common_cap.num_txq = 1;
2429 /* two MSIX vectors, one for traffic and one for misc causes */
2430 func_caps->common_cap.num_msix_vectors = 2;
2431 func_caps->guar_num_vsi = 1;
2433 /* cache some dev_caps values that should be restored after memset */
2434 cached_caps = dev_caps->common_cap;
2435 num_funcs = dev_caps->num_funcs;
2437 /* unset dev capabilities */
2438 memset(dev_caps, 0, sizeof(*dev_caps));
2440 #define ICE_RESTORE_DEV_CAP(name) \
2441 dev_caps->common_cap.name = cached_caps.name
2443 /* restore cached values */
2444 ICE_RESTORE_DEV_CAP(valid_functions);
2445 ICE_RESTORE_DEV_CAP(txq_first_id);
2446 ICE_RESTORE_DEV_CAP(rxq_first_id);
2447 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2448 ICE_RESTORE_DEV_CAP(max_mtu);
2449 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2450 dev_caps->num_funcs = num_funcs;
2452 /* one Tx and one Rx queue per function in safe mode */
2453 dev_caps->common_cap.num_rxq = num_funcs;
2454 dev_caps->common_cap.num_txq = num_funcs;
2456 /* two MSIX vectors per function */
2457 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2461 * ice_get_caps - get info about the HW
2462 * @hw: pointer to the hardware structure
2464 enum ice_status ice_get_caps(struct ice_hw *hw)
2466 enum ice_status status;
2468 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2472 return ice_discover_func_caps(hw, &hw->func_caps);
2476 * ice_aq_manage_mac_write - manage MAC address write command
2477 * @hw: pointer to the HW struct
2478 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2479 * @flags: flags to control write behavior
2480 * @cd: pointer to command details structure or NULL
2482 * This function is used to write MAC address to the NVM (0x0108).
2485 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2486 struct ice_sq_cd *cd)
2488 struct ice_aqc_manage_mac_write *cmd;
2489 struct ice_aq_desc desc;
2491 cmd = &desc.params.mac_write;
2492 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2495 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2497 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2501 * ice_aq_clear_pxe_mode
2502 * @hw: pointer to the HW struct
2504 * Tell the firmware that the driver is taking over from PXE (0x0110).
2506 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2508 struct ice_aq_desc desc;
2510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2511 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2513 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2517 * ice_clear_pxe_mode - clear pxe operations mode
2518 * @hw: pointer to the HW struct
2520 * Make sure all PXE mode settings are cleared, including things
2521 * like descriptor fetch/write-back mode.
2523 void ice_clear_pxe_mode(struct ice_hw *hw)
2525 if (ice_check_sq_alive(hw, &hw->adminq))
2526 ice_aq_clear_pxe_mode(hw);
2530 * ice_aq_set_port_params - set physical port parameters.
2531 * @pi: pointer to the port info struct
2532 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2533 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2534 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2535 * @double_vlan: if set double VLAN is enabled
2536 * @cd: pointer to command details structure or NULL
2538 * Set Physical port parameters (0x0203)
2541 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2542 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2543 struct ice_sq_cd *cd)
2546 struct ice_aqc_set_port_params *cmd;
2547 struct ice_hw *hw = pi->hw;
2548 struct ice_aq_desc desc;
2551 cmd = &desc.params.set_port_params;
2553 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2554 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2556 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2558 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2560 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2561 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2563 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2567 * ice_get_link_speed_based_on_phy_type - returns link speed
2568 * @phy_type_low: lower part of phy_type
2569 * @phy_type_high: higher part of phy_type
2571 * This helper function will convert an entry in PHY type structure
2572 * [phy_type_low, phy_type_high] to its corresponding link speed.
2573 * Note: In the structure of [phy_type_low, phy_type_high], there should
2574 * be one bit set, as this function will convert one PHY type to its
2576 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2577 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2580 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2582 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2583 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2585 switch (phy_type_low) {
2586 case ICE_PHY_TYPE_LOW_100BASE_TX:
2587 case ICE_PHY_TYPE_LOW_100M_SGMII:
2588 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2590 case ICE_PHY_TYPE_LOW_1000BASE_T:
2591 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2592 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2593 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2594 case ICE_PHY_TYPE_LOW_1G_SGMII:
2595 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2597 case ICE_PHY_TYPE_LOW_2500BASE_T:
2598 case ICE_PHY_TYPE_LOW_2500BASE_X:
2599 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2600 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2602 case ICE_PHY_TYPE_LOW_5GBASE_T:
2603 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2604 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2606 case ICE_PHY_TYPE_LOW_10GBASE_T:
2607 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2608 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2609 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2610 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2611 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2612 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2613 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2615 case ICE_PHY_TYPE_LOW_25GBASE_T:
2616 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2617 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2618 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2619 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2620 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2621 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2622 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2623 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2624 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2625 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2626 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2628 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2629 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2630 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2631 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2632 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2633 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2634 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2636 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2637 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2638 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2639 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2640 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2641 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2642 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2643 case ICE_PHY_TYPE_LOW_50G_AUI2:
2644 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2645 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2646 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2647 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2648 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2649 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2650 case ICE_PHY_TYPE_LOW_50G_AUI1:
2651 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2653 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2654 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2655 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2656 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2657 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2658 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2659 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2660 case ICE_PHY_TYPE_LOW_100G_AUI4:
2661 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2662 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2663 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2664 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2665 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2666 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2669 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2673 switch (phy_type_high) {
2674 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2675 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2676 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2677 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2678 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2679 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2682 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2686 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2687 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2688 return ICE_AQ_LINK_SPEED_UNKNOWN;
2689 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2690 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2691 return ICE_AQ_LINK_SPEED_UNKNOWN;
2692 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2693 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2694 return speed_phy_type_low;
2696 return speed_phy_type_high;
2700 * ice_update_phy_type
2701 * @phy_type_low: pointer to the lower part of phy_type
2702 * @phy_type_high: pointer to the higher part of phy_type
2703 * @link_speeds_bitmap: targeted link speeds bitmap
2705 * Note: For the link_speeds_bitmap structure, you can check it at
2706 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2707 * link_speeds_bitmap include multiple speeds.
2709 * Each entry in this [phy_type_low, phy_type_high] structure will
2710 * present a certain link speed. This helper function will turn on bits
2711 * in [phy_type_low, phy_type_high] structure based on the value of
2712 * link_speeds_bitmap input parameter.
2715 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2716 u16 link_speeds_bitmap)
2723 /* We first check with low part of phy_type */
2724 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2725 pt_low = BIT_ULL(index);
2726 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2728 if (link_speeds_bitmap & speed)
2729 *phy_type_low |= BIT_ULL(index);
2732 /* We then check with high part of phy_type */
2733 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2734 pt_high = BIT_ULL(index);
2735 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2737 if (link_speeds_bitmap & speed)
2738 *phy_type_high |= BIT_ULL(index);
2743 * ice_aq_set_phy_cfg
2744 * @hw: pointer to the HW struct
2745 * @pi: port info structure of the interested logical port
2746 * @cfg: structure with PHY configuration data to be set
2747 * @cd: pointer to command details structure or NULL
2749 * Set the various PHY configuration parameters supported on the Port.
2750 * One or more of the Set PHY config parameters may be ignored in an MFP
2751 * mode as the PF may not have the privilege to set some of the PHY Config
2752 * parameters. This status will be indicated by the command response (0x0601).
2755 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2756 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2758 struct ice_aq_desc desc;
2759 enum ice_status status;
2762 return ICE_ERR_PARAM;
2764 /* Ensure that only valid bits of cfg->caps can be turned on. */
2765 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2766 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2769 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2772 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2773 desc.params.set_phy.lport_num = pi->lport;
2774 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2776 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2777 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2778 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2779 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2780 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2781 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2782 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2783 cfg->low_power_ctrl_an);
2784 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2785 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2786 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2789 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2791 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2792 status = ICE_SUCCESS;
2795 pi->phy.curr_user_phy_cfg = *cfg;
2801 * ice_update_link_info - update status of the HW network link
2802 * @pi: port info structure of the interested logical port
2804 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2806 struct ice_link_status *li;
2807 enum ice_status status;
2810 return ICE_ERR_PARAM;
2812 li = &pi->phy.link_info;
2814 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2818 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2819 struct ice_aqc_get_phy_caps_data *pcaps;
2823 pcaps = (struct ice_aqc_get_phy_caps_data *)
2824 ice_malloc(hw, sizeof(*pcaps));
2826 return ICE_ERR_NO_MEMORY;
2828 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2831 if (status == ICE_SUCCESS)
2832 ice_memcpy(li->module_type, &pcaps->module_type,
2833 sizeof(li->module_type),
2834 ICE_NONDMA_TO_NONDMA);
2836 ice_free(hw, pcaps);
2843 * ice_cache_phy_user_req
2844 * @pi: port information structure
2845 * @cache_data: PHY logging data
2846 * @cache_mode: PHY logging mode
2848 * Log the user request on (FC, FEC, SPEED) for later user.
2851 ice_cache_phy_user_req(struct ice_port_info *pi,
2852 struct ice_phy_cache_mode_data cache_data,
2853 enum ice_phy_cache_mode cache_mode)
2858 switch (cache_mode) {
2860 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2862 case ICE_SPEED_MODE:
2863 pi->phy.curr_user_speed_req =
2864 cache_data.data.curr_user_speed_req;
2867 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2875 * ice_caps_to_fc_mode
2876 * @caps: PHY capabilities
2878 * Convert PHY FC capabilities to ice FC mode
2880 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2882 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2883 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2886 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2887 return ICE_FC_TX_PAUSE;
2889 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2890 return ICE_FC_RX_PAUSE;
2896 * ice_caps_to_fec_mode
2897 * @caps: PHY capabilities
2898 * @fec_options: Link FEC options
2900 * Convert PHY FEC capabilities to ice FEC mode
2902 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2904 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2905 return ICE_FEC_AUTO;
2907 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2908 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2909 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2910 ICE_AQC_PHY_FEC_25G_KR_REQ))
2911 return ICE_FEC_BASER;
2913 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2914 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2915 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2918 return ICE_FEC_NONE;
2922 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2923 * @pi: port information structure
2924 * @cfg: PHY configuration data to set FC mode
2925 * @req_mode: FC mode to configure
2927 static enum ice_status
2928 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2929 enum ice_fc_mode req_mode)
2931 struct ice_phy_cache_mode_data cache_data;
2932 u8 pause_mask = 0x0;
2935 return ICE_ERR_BAD_PTR;
2940 struct ice_aqc_get_phy_caps_data *pcaps;
2941 enum ice_status status;
2943 pcaps = (struct ice_aqc_get_phy_caps_data *)
2944 ice_malloc(pi->hw, sizeof(*pcaps));
2946 return ICE_ERR_NO_MEMORY;
2948 /* Query the value of FC that both the NIC and attached media
2951 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2954 ice_free(pi->hw, pcaps);
2958 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2959 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2961 ice_free(pi->hw, pcaps);
2965 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2966 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2968 case ICE_FC_RX_PAUSE:
2969 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2971 case ICE_FC_TX_PAUSE:
2972 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2978 /* clear the old pause settings */
2979 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2980 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2982 /* set the new capabilities */
2983 cfg->caps |= pause_mask;
2985 /* Cache user FC request */
2986 cache_data.data.curr_user_fc_req = req_mode;
2987 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2994 * @pi: port information structure
2995 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2996 * @ena_auto_link_update: enable automatic link update
2998 * Set the requested flow control mode.
3001 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3003 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3004 struct ice_aqc_get_phy_caps_data *pcaps;
3005 enum ice_status status;
3008 if (!pi || !aq_failures)
3009 return ICE_ERR_BAD_PTR;
3014 pcaps = (struct ice_aqc_get_phy_caps_data *)
3015 ice_malloc(hw, sizeof(*pcaps));
3017 return ICE_ERR_NO_MEMORY;
3019 /* Get the current PHY config */
3020 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3024 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3028 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3030 /* Configure the set PHY data */
3031 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3033 if (status != ICE_ERR_BAD_PTR)
3034 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3039 /* If the capabilities have changed, then set the new config */
3040 if (cfg.caps != pcaps->caps) {
3041 int retry_count, retry_max = 10;
3043 /* Auto restart link so settings take effect */
3044 if (ena_auto_link_update)
3045 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3047 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3049 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3053 /* Update the link info
3054 * It sometimes takes a really long time for link to
3055 * come back from the atomic reset. Thus, we wait a
3058 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3059 status = ice_update_link_info(pi);
3061 if (status == ICE_SUCCESS)
3064 ice_msec_delay(100, true);
3068 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3072 ice_free(hw, pcaps);
3077 * ice_phy_caps_equals_cfg
3078 * @phy_caps: PHY capabilities
3079 * @phy_cfg: PHY configuration
3081 * Helper function to determine if PHY capabilities matches PHY
3085 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3086 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3088 u8 caps_mask, cfg_mask;
3090 if (!phy_caps || !phy_cfg)
3093 /* These bits are not common between capabilities and configuration.
3094 * Do not use them to determine equality.
3096 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3097 ICE_AQC_PHY_EN_MOD_QUAL);
3098 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3100 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3101 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3102 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3103 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3104 phy_caps->eee_cap != phy_cfg->eee_cap ||
3105 phy_caps->eeer_value != phy_cfg->eeer_value ||
3106 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3113 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3114 * @pi: port information structure
3115 * @caps: PHY ability structure to copy date from
3116 * @cfg: PHY configuration structure to copy data to
3118 * Helper function to copy AQC PHY get ability data to PHY set configuration
3122 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3123 struct ice_aqc_get_phy_caps_data *caps,
3124 struct ice_aqc_set_phy_cfg_data *cfg)
3126 if (!pi || !caps || !cfg)
3129 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3130 cfg->phy_type_low = caps->phy_type_low;
3131 cfg->phy_type_high = caps->phy_type_high;
3132 cfg->caps = caps->caps;
3133 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3134 cfg->eee_cap = caps->eee_cap;
3135 cfg->eeer_value = caps->eeer_value;
3136 cfg->link_fec_opt = caps->link_fec_options;
3137 cfg->module_compliance_enforcement =
3138 caps->module_compliance_enforcement;
3142 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3143 * @pi: port information structure
3144 * @cfg: PHY configuration data to set FEC mode
3145 * @fec: FEC mode to configure
3148 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3149 enum ice_fec_mode fec)
3151 struct ice_aqc_get_phy_caps_data *pcaps;
3152 enum ice_status status = ICE_SUCCESS;
3156 return ICE_ERR_BAD_PTR;
3160 pcaps = (struct ice_aqc_get_phy_caps_data *)
3161 ice_malloc(hw, sizeof(*pcaps));
3163 return ICE_ERR_NO_MEMORY;
3165 status = ice_aq_get_phy_caps(pi, false,
3166 (ice_fw_supports_report_dflt_cfg(hw) ?
3167 ICE_AQC_REPORT_DFLT_CFG :
3168 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3173 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3174 cfg->link_fec_opt = pcaps->link_fec_options;
3178 /* Clear RS bits, and AND BASE-R ability
3179 * bits and OR request bits.
3181 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3182 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3183 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3184 ICE_AQC_PHY_FEC_25G_KR_REQ;
3187 /* Clear BASE-R bits, and AND RS ability
3188 * bits and OR request bits.
3190 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3191 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3192 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3195 /* Clear all FEC option bits. */
3196 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3199 /* AND auto FEC bit, and all caps bits. */
3200 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3201 cfg->link_fec_opt |= pcaps->link_fec_options;
3204 status = ICE_ERR_PARAM;
3208 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3209 !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3210 struct ice_link_default_override_tlv tlv;
3212 if (ice_get_link_default_override(&tlv, pi))
3215 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3216 (tlv.options & ICE_LINK_OVERRIDE_EN))
3217 cfg->link_fec_opt = tlv.fec_options;
3221 ice_free(hw, pcaps);
3227 * ice_get_link_status - get status of the HW network link
3228 * @pi: port information structure
3229 * @link_up: pointer to bool (true/false = linkup/linkdown)
3231 * Variable link_up is true if link is up, false if link is down.
3232 * The variable link_up is invalid if status is non zero. As a
3233 * result of this call, link status reporting becomes enabled
3235 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3237 struct ice_phy_info *phy_info;
3238 enum ice_status status = ICE_SUCCESS;
3240 if (!pi || !link_up)
3241 return ICE_ERR_PARAM;
3243 phy_info = &pi->phy;
3245 if (phy_info->get_link_info) {
3246 status = ice_update_link_info(pi);
3249 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3253 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3259 * ice_aq_set_link_restart_an
3260 * @pi: pointer to the port information structure
3261 * @ena_link: if true: enable link, if false: disable link
3262 * @cd: pointer to command details structure or NULL
3264 * Sets up the link and restarts the Auto-Negotiation over the link.
3267 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3268 struct ice_sq_cd *cd)
3270 struct ice_aqc_restart_an *cmd;
3271 struct ice_aq_desc desc;
3273 cmd = &desc.params.restart_an;
3275 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3277 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3278 cmd->lport_num = pi->lport;
3280 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3282 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3284 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3288 * ice_aq_set_event_mask
3289 * @hw: pointer to the HW struct
3290 * @port_num: port number of the physical function
3291 * @mask: event mask to be set
3292 * @cd: pointer to command details structure or NULL
3294 * Set event mask (0x0613)
3297 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3298 struct ice_sq_cd *cd)
3300 struct ice_aqc_set_event_mask *cmd;
3301 struct ice_aq_desc desc;
3303 cmd = &desc.params.set_event_mask;
3305 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3307 cmd->lport_num = port_num;
3309 cmd->event_mask = CPU_TO_LE16(mask);
3310 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3314 * ice_aq_set_mac_loopback
3315 * @hw: pointer to the HW struct
3316 * @ena_lpbk: Enable or Disable loopback
3317 * @cd: pointer to command details structure or NULL
3319 * Enable/disable loopback on a given port
3322 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3324 struct ice_aqc_set_mac_lb *cmd;
3325 struct ice_aq_desc desc;
3327 cmd = &desc.params.set_mac_lb;
3329 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3331 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3333 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3337 * ice_aq_set_port_id_led
3338 * @pi: pointer to the port information
3339 * @is_orig_mode: is this LED set to original mode (by the net-list)
3340 * @cd: pointer to command details structure or NULL
3342 * Set LED value for the given port (0x06e9)
3345 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3346 struct ice_sq_cd *cd)
3348 struct ice_aqc_set_port_id_led *cmd;
3349 struct ice_hw *hw = pi->hw;
3350 struct ice_aq_desc desc;
3352 cmd = &desc.params.set_port_id_led;
3354 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3357 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3359 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3361 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3366 * @hw: pointer to the HW struct
3367 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3368 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3369 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3371 * @set_page: set or ignore the page
3372 * @data: pointer to data buffer to be read/written to the I2C device.
3373 * @length: 1-16 for read, 1 for write.
3374 * @write: 0 read, 1 for write.
3375 * @cd: pointer to command details structure or NULL
3377 * Read/Write SFF EEPROM (0x06EE)
3380 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3381 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3382 bool write, struct ice_sq_cd *cd)
3384 struct ice_aqc_sff_eeprom *cmd;
3385 struct ice_aq_desc desc;
3386 enum ice_status status;
3388 if (!data || (mem_addr & 0xff00))
3389 return ICE_ERR_PARAM;
3391 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3392 cmd = &desc.params.read_write_sff_param;
3393 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3394 cmd->lport_num = (u8)(lport & 0xff);
3395 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3396 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3397 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3399 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3400 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3401 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3402 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3404 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3406 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3411 * ice_aq_prog_topo_dev_nvm
3412 * @hw: pointer to the hardware structure
3413 * @topo_params: pointer to structure storing topology parameters for a device
3414 * @cd: pointer to command details structure or NULL
3416 * Program Topology Device NVM (0x06F2)
3420 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
3421 struct ice_aqc_link_topo_params *topo_params,
3422 struct ice_sq_cd *cd)
3424 struct ice_aqc_prog_topo_dev_nvm *cmd;
3425 struct ice_aq_desc desc;
3427 cmd = &desc.params.prog_topo_dev_nvm;
3429 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
3431 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3432 ICE_NONDMA_TO_NONDMA);
3434 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3438 * ice_aq_read_topo_dev_nvm
3439 * @hw: pointer to the hardware structure
3440 * @topo_params: pointer to structure storing topology parameters for a device
3441 * @start_address: byte offset in the topology device NVM
3442 * @data: pointer to data buffer
3443 * @data_size: number of bytes to be read from the topology device NVM
3444 * @cd: pointer to command details structure or NULL
3445 * Read Topology Device NVM (0x06F3)
3449 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
3450 struct ice_aqc_link_topo_params *topo_params,
3451 u32 start_address, u8 *data, u8 data_size,
3452 struct ice_sq_cd *cd)
3454 struct ice_aqc_read_topo_dev_nvm *cmd;
3455 struct ice_aq_desc desc;
3456 enum ice_status status;
3458 if (!data || data_size == 0 ||
3459 data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
3460 return ICE_ERR_PARAM;
3462 cmd = &desc.params.read_topo_dev_nvm;
3464 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
3466 desc.datalen = data_size;
3467 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3468 ICE_NONDMA_TO_NONDMA);
3469 cmd->start_address = CPU_TO_LE32(start_address);
3471 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3475 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
3481 * __ice_aq_get_set_rss_lut
3482 * @hw: pointer to the hardware structure
3483 * @params: RSS LUT parameters
3484 * @set: set true to set the table, false to get the table
3486 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3488 static enum ice_status
3489 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3491 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3492 struct ice_aqc_get_set_rss_lut *cmd_resp;
3493 struct ice_aq_desc desc;
3494 enum ice_status status;
3498 return ICE_ERR_PARAM;
3500 vsi_handle = params->vsi_handle;
3503 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3504 return ICE_ERR_PARAM;
3506 lut_size = params->lut_size;
3507 lut_type = params->lut_type;
3508 glob_lut_idx = params->global_lut_id;
3509 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3511 cmd_resp = &desc.params.get_set_rss_lut;
3514 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3515 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3517 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3520 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3521 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3522 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3523 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3526 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3527 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3528 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3529 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3530 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3533 status = ICE_ERR_PARAM;
3534 goto ice_aq_get_set_rss_lut_exit;
3537 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3538 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3539 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3542 goto ice_aq_get_set_rss_lut_send;
3543 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3545 goto ice_aq_get_set_rss_lut_send;
3547 goto ice_aq_get_set_rss_lut_send;
3550 /* LUT size is only valid for Global and PF table types */
3552 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3553 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3554 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3555 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3557 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3558 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3559 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3560 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3562 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3563 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3564 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3565 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3566 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3571 status = ICE_ERR_PARAM;
3572 goto ice_aq_get_set_rss_lut_exit;
3575 ice_aq_get_set_rss_lut_send:
3576 cmd_resp->flags = CPU_TO_LE16(flags);
3577 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3579 ice_aq_get_set_rss_lut_exit:
3584 * ice_aq_get_rss_lut
3585 * @hw: pointer to the hardware structure
3586 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3588 * get the RSS lookup table, PF or VSI type
3591 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3593 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3597 * ice_aq_set_rss_lut
3598 * @hw: pointer to the hardware structure
3599 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3601 * set the RSS lookup table, PF or VSI type
3604 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3606 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3610 * __ice_aq_get_set_rss_key
3611 * @hw: pointer to the HW struct
3612 * @vsi_id: VSI FW index
3613 * @key: pointer to key info struct
3614 * @set: set true to set the key, false to get the key
3616 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3619 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3620 struct ice_aqc_get_set_rss_keys *key,
3623 struct ice_aqc_get_set_rss_key *cmd_resp;
3624 u16 key_size = sizeof(*key);
3625 struct ice_aq_desc desc;
3627 cmd_resp = &desc.params.get_set_rss_key;
3630 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3631 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3633 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3636 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3637 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3638 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3639 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3641 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3645 * ice_aq_get_rss_key
3646 * @hw: pointer to the HW struct
3647 * @vsi_handle: software VSI handle
3648 * @key: pointer to key info struct
3650 * get the RSS key per VSI
3653 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3654 struct ice_aqc_get_set_rss_keys *key)
3656 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3657 return ICE_ERR_PARAM;
3659 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3664 * ice_aq_set_rss_key
3665 * @hw: pointer to the HW struct
3666 * @vsi_handle: software VSI handle
3667 * @keys: pointer to key info struct
3669 * set the RSS key per VSI
3672 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3673 struct ice_aqc_get_set_rss_keys *keys)
3675 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3676 return ICE_ERR_PARAM;
3678 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3683 * ice_aq_add_lan_txq
3684 * @hw: pointer to the hardware structure
3685 * @num_qgrps: Number of added queue groups
3686 * @qg_list: list of queue groups to be added
3687 * @buf_size: size of buffer for indirect command
3688 * @cd: pointer to command details structure or NULL
3690 * Add Tx LAN queue (0x0C30)
3693 * Prior to calling add Tx LAN queue:
3694 * Initialize the following as part of the Tx queue context:
3695 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3696 * Cache profile and Packet shaper profile.
3698 * After add Tx LAN queue AQ command is completed:
3699 * Interrupts should be associated with specific queues,
3700 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3704 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3705 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3706 struct ice_sq_cd *cd)
3708 struct ice_aqc_add_tx_qgrp *list;
3709 struct ice_aqc_add_txqs *cmd;
3710 struct ice_aq_desc desc;
3711 u16 i, sum_size = 0;
3713 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3715 cmd = &desc.params.add_txqs;
3717 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3720 return ICE_ERR_PARAM;
3722 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3723 return ICE_ERR_PARAM;
3725 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3726 sum_size += ice_struct_size(list, txqs, list->num_txqs);
3727 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3731 if (buf_size != sum_size)
3732 return ICE_ERR_PARAM;
3734 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3736 cmd->num_qgrps = num_qgrps;
3738 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3742 * ice_aq_dis_lan_txq
3743 * @hw: pointer to the hardware structure
3744 * @num_qgrps: number of groups in the list
3745 * @qg_list: the list of groups to disable
3746 * @buf_size: the total size of the qg_list buffer in bytes
3747 * @rst_src: if called due to reset, specifies the reset source
3748 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3749 * @cd: pointer to command details structure or NULL
3751 * Disable LAN Tx queue (0x0C31)
3753 static enum ice_status
3754 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3755 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3756 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3757 struct ice_sq_cd *cd)
3759 struct ice_aqc_dis_txq_item *item;
3760 struct ice_aqc_dis_txqs *cmd;
3761 struct ice_aq_desc desc;
3762 enum ice_status status;
3765 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3766 cmd = &desc.params.dis_txqs;
3767 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3769 /* qg_list can be NULL only in VM/VF reset flow */
3770 if (!qg_list && !rst_src)
3771 return ICE_ERR_PARAM;
3773 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3774 return ICE_ERR_PARAM;
3776 cmd->num_entries = num_qgrps;
3778 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3779 ICE_AQC_Q_DIS_TIMEOUT_M);
3783 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3784 cmd->vmvf_and_timeout |=
3785 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3792 /* flush pipe on time out */
3793 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3794 /* If no queue group info, we are in a reset flow. Issue the AQ */
3798 /* set RD bit to indicate that command buffer is provided by the driver
3799 * and it needs to be read by the firmware
3801 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3803 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3804 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
3806 /* If the num of queues is even, add 2 bytes of padding */
3807 if ((item->num_qs % 2) == 0)
3812 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3816 return ICE_ERR_PARAM;
3819 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3822 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3823 vmvf_num, hw->adminq.sq_last_status);
3825 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3826 LE16_TO_CPU(qg_list[0].q_id[0]),
3827 hw->adminq.sq_last_status);
3833 * ice_aq_move_recfg_lan_txq
3834 * @hw: pointer to the hardware structure
3835 * @num_qs: number of queues to move/reconfigure
3836 * @is_move: true if this operation involves node movement
3837 * @is_tc_change: true if this operation involves a TC change
3838 * @subseq_call: true if this operation is a subsequent call
3839 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3840 * @timeout: timeout in units of 100 usec (valid values 0-50)
3841 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3842 * @buf: struct containing src/dest TEID and per-queue info
3843 * @buf_size: size of buffer for indirect command
3844 * @txqs_moved: out param, number of queues successfully moved
3845 * @cd: pointer to command details structure or NULL
3847 * Move / Reconfigure Tx LAN queues (0x0C32)
3850 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3851 bool is_tc_change, bool subseq_call, bool flush_pipe,
3852 u8 timeout, u32 *blocked_cgds,
3853 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3854 u8 *txqs_moved, struct ice_sq_cd *cd)
3856 struct ice_aqc_move_txqs *cmd;
3857 struct ice_aq_desc desc;
3858 enum ice_status status;
3860 cmd = &desc.params.move_txqs;
3861 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3863 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3864 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3865 return ICE_ERR_PARAM;
3867 if (is_tc_change && !flush_pipe && !blocked_cgds)
3868 return ICE_ERR_PARAM;
3870 if (!is_move && !is_tc_change)
3871 return ICE_ERR_PARAM;
3873 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3876 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3879 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3882 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3885 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3887 cmd->num_qs = num_qs;
3888 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3889 ICE_AQC_Q_CMD_TIMEOUT_M);
3891 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3893 if (!status && txqs_moved)
3894 *txqs_moved = cmd->num_qs;
3896 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3897 is_tc_change && !flush_pipe)
3898 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3903 /* End of FW Admin Queue command wrappers */
3906 * ice_write_byte - write a byte to a packed context structure
3907 * @src_ctx: the context structure to read from
3908 * @dest_ctx: the context to be written to
3909 * @ce_info: a description of the struct to be filled
3912 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3914 u8 src_byte, dest_byte, mask;
3918 /* copy from the next struct field */
3919 from = src_ctx + ce_info->offset;
3921 /* prepare the bits and mask */
3922 shift_width = ce_info->lsb % 8;
3923 mask = (u8)(BIT(ce_info->width) - 1);
3928 /* shift to correct alignment */
3929 mask <<= shift_width;
3930 src_byte <<= shift_width;
3932 /* get the current bits from the target bit string */
3933 dest = dest_ctx + (ce_info->lsb / 8);
3935 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3937 dest_byte &= ~mask; /* get the bits not changing */
3938 dest_byte |= src_byte; /* add in the new bits */
3940 /* put it all back */
3941 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3945 * ice_write_word - write a word to a packed context structure
3946 * @src_ctx: the context structure to read from
3947 * @dest_ctx: the context to be written to
3948 * @ce_info: a description of the struct to be filled
3951 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3958 /* copy from the next struct field */
3959 from = src_ctx + ce_info->offset;
3961 /* prepare the bits and mask */
3962 shift_width = ce_info->lsb % 8;
3963 mask = BIT(ce_info->width) - 1;
3965 /* don't swizzle the bits until after the mask because the mask bits
3966 * will be in a different bit position on big endian machines
3968 src_word = *(u16 *)from;
3971 /* shift to correct alignment */
3972 mask <<= shift_width;
3973 src_word <<= shift_width;
3975 /* get the current bits from the target bit string */
3976 dest = dest_ctx + (ce_info->lsb / 8);
3978 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3980 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3981 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3983 /* put it all back */
3984 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3988 * ice_write_dword - write a dword to a packed context structure
3989 * @src_ctx: the context structure to read from
3990 * @dest_ctx: the context to be written to
3991 * @ce_info: a description of the struct to be filled
3994 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3996 u32 src_dword, mask;
4001 /* copy from the next struct field */
4002 from = src_ctx + ce_info->offset;
4004 /* prepare the bits and mask */
4005 shift_width = ce_info->lsb % 8;
4007 /* if the field width is exactly 32 on an x86 machine, then the shift
4008 * operation will not work because the SHL instructions count is masked
4009 * to 5 bits so the shift will do nothing
4011 if (ce_info->width < 32)
4012 mask = BIT(ce_info->width) - 1;
4016 /* don't swizzle the bits until after the mask because the mask bits
4017 * will be in a different bit position on big endian machines
4019 src_dword = *(u32 *)from;
4022 /* shift to correct alignment */
4023 mask <<= shift_width;
4024 src_dword <<= shift_width;
4026 /* get the current bits from the target bit string */
4027 dest = dest_ctx + (ce_info->lsb / 8);
4029 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4031 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
4032 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
4034 /* put it all back */
4035 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4039 * ice_write_qword - write a qword to a packed context structure
4040 * @src_ctx: the context structure to read from
4041 * @dest_ctx: the context to be written to
4042 * @ce_info: a description of the struct to be filled
4045 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4047 u64 src_qword, mask;
4052 /* copy from the next struct field */
4053 from = src_ctx + ce_info->offset;
4055 /* prepare the bits and mask */
4056 shift_width = ce_info->lsb % 8;
4058 /* if the field width is exactly 64 on an x86 machine, then the shift
4059 * operation will not work because the SHL instructions count is masked
4060 * to 6 bits so the shift will do nothing
4062 if (ce_info->width < 64)
4063 mask = BIT_ULL(ce_info->width) - 1;
4067 /* don't swizzle the bits until after the mask because the mask bits
4068 * will be in a different bit position on big endian machines
4070 src_qword = *(u64 *)from;
4073 /* shift to correct alignment */
4074 mask <<= shift_width;
4075 src_qword <<= shift_width;
4077 /* get the current bits from the target bit string */
4078 dest = dest_ctx + (ce_info->lsb / 8);
4080 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4082 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
4083 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
4085 /* put it all back */
4086 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4090 * ice_set_ctx - set context bits in packed structure
4091 * @hw: pointer to the hardware structure
4092 * @src_ctx: pointer to a generic non-packed context structure
4093 * @dest_ctx: pointer to memory for the packed structure
4094 * @ce_info: a description of the structure to be transformed
4097 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4098 const struct ice_ctx_ele *ce_info)
4102 for (f = 0; ce_info[f].width; f++) {
4103 /* We have to deal with each element of the FW response
4104 * using the correct size so that we are correct regardless
4105 * of the endianness of the machine.
4107 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4108 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4109 f, ce_info[f].width, ce_info[f].size_of);
4112 switch (ce_info[f].size_of) {
4114 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4117 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4120 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4123 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4126 return ICE_ERR_INVAL_SIZE;
4134 * ice_read_byte - read context byte into struct
4135 * @src_ctx: the context structure to read from
4136 * @dest_ctx: the context to be written to
4137 * @ce_info: a description of the struct to be filled
4140 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4146 /* prepare the bits and mask */
4147 shift_width = ce_info->lsb % 8;
4148 mask = (u8)(BIT(ce_info->width) - 1);
4150 /* shift to correct alignment */
4151 mask <<= shift_width;
4153 /* get the current bits from the src bit string */
4154 src = src_ctx + (ce_info->lsb / 8);
4156 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4158 dest_byte &= ~(mask);
4160 dest_byte >>= shift_width;
4162 /* get the address from the struct field */
4163 target = dest_ctx + ce_info->offset;
4165 /* put it back in the struct */
4166 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4170 * ice_read_word - read context word into struct
4171 * @src_ctx: the context structure to read from
4172 * @dest_ctx: the context to be written to
4173 * @ce_info: a description of the struct to be filled
4176 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4178 u16 dest_word, mask;
4183 /* prepare the bits and mask */
4184 shift_width = ce_info->lsb % 8;
4185 mask = BIT(ce_info->width) - 1;
4187 /* shift to correct alignment */
4188 mask <<= shift_width;
4190 /* get the current bits from the src bit string */
4191 src = src_ctx + (ce_info->lsb / 8);
4193 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4195 /* the data in the memory is stored as little endian so mask it
4198 src_word &= ~(CPU_TO_LE16(mask));
4200 /* get the data back into host order before shifting */
4201 dest_word = LE16_TO_CPU(src_word);
4203 dest_word >>= shift_width;
4205 /* get the address from the struct field */
4206 target = dest_ctx + ce_info->offset;
4208 /* put it back in the struct */
4209 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4213 * ice_read_dword - read context dword into struct
4214 * @src_ctx: the context structure to read from
4215 * @dest_ctx: the context to be written to
4216 * @ce_info: a description of the struct to be filled
4219 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4221 u32 dest_dword, mask;
4226 /* prepare the bits and mask */
4227 shift_width = ce_info->lsb % 8;
4229 /* if the field width is exactly 32 on an x86 machine, then the shift
4230 * operation will not work because the SHL instructions count is masked
4231 * to 5 bits so the shift will do nothing
4233 if (ce_info->width < 32)
4234 mask = BIT(ce_info->width) - 1;
4238 /* shift to correct alignment */
4239 mask <<= shift_width;
4241 /* get the current bits from the src bit string */
4242 src = src_ctx + (ce_info->lsb / 8);
4244 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4246 /* the data in the memory is stored as little endian so mask it
4249 src_dword &= ~(CPU_TO_LE32(mask));
4251 /* get the data back into host order before shifting */
4252 dest_dword = LE32_TO_CPU(src_dword);
4254 dest_dword >>= shift_width;
4256 /* get the address from the struct field */
4257 target = dest_ctx + ce_info->offset;
4259 /* put it back in the struct */
4260 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4264 * ice_read_qword - read context qword into struct
4265 * @src_ctx: the context structure to read from
4266 * @dest_ctx: the context to be written to
4267 * @ce_info: a description of the struct to be filled
4270 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4272 u64 dest_qword, mask;
4277 /* prepare the bits and mask */
4278 shift_width = ce_info->lsb % 8;
4280 /* if the field width is exactly 64 on an x86 machine, then the shift
4281 * operation will not work because the SHL instructions count is masked
4282 * to 6 bits so the shift will do nothing
4284 if (ce_info->width < 64)
4285 mask = BIT_ULL(ce_info->width) - 1;
4289 /* shift to correct alignment */
4290 mask <<= shift_width;
4292 /* get the current bits from the src bit string */
4293 src = src_ctx + (ce_info->lsb / 8);
4295 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4297 /* the data in the memory is stored as little endian so mask it
4300 src_qword &= ~(CPU_TO_LE64(mask));
4302 /* get the data back into host order before shifting */
4303 dest_qword = LE64_TO_CPU(src_qword);
4305 dest_qword >>= shift_width;
4307 /* get the address from the struct field */
4308 target = dest_ctx + ce_info->offset;
4310 /* put it back in the struct */
4311 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4315 * ice_get_ctx - extract context bits from a packed structure
4316 * @src_ctx: pointer to a generic packed context structure
4317 * @dest_ctx: pointer to a generic non-packed context structure
4318 * @ce_info: a description of the structure to be read from
4321 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4325 for (f = 0; ce_info[f].width; f++) {
4326 switch (ce_info[f].size_of) {
4328 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4331 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4334 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4337 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4340 /* nothing to do, just keep going */
4349 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4350 * @hw: pointer to the HW struct
4351 * @vsi_handle: software VSI handle
4353 * @q_handle: software queue handle
4356 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4358 struct ice_vsi_ctx *vsi;
4359 struct ice_q_ctx *q_ctx;
4361 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4364 if (q_handle >= vsi->num_lan_q_entries[tc])
4366 if (!vsi->lan_q_ctx[tc])
4368 q_ctx = vsi->lan_q_ctx[tc];
4369 return &q_ctx[q_handle];
4374 * @pi: port information structure
4375 * @vsi_handle: software VSI handle
4377 * @q_handle: software queue handle
4378 * @num_qgrps: Number of added queue groups
4379 * @buf: list of queue groups to be added
4380 * @buf_size: size of buffer for indirect command
4381 * @cd: pointer to command details structure or NULL
4383 * This function adds one LAN queue
4386 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4387 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4388 struct ice_sq_cd *cd)
4390 struct ice_aqc_txsched_elem_data node = { 0 };
4391 struct ice_sched_node *parent;
4392 struct ice_q_ctx *q_ctx;
4393 enum ice_status status;
4396 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4399 if (num_qgrps > 1 || buf->num_txqs > 1)
4400 return ICE_ERR_MAX_LIMIT;
4404 if (!ice_is_vsi_valid(hw, vsi_handle))
4405 return ICE_ERR_PARAM;
4407 ice_acquire_lock(&pi->sched_lock);
4409 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4411 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4413 status = ICE_ERR_PARAM;
4417 /* find a parent node */
4418 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4419 ICE_SCHED_NODE_OWNER_LAN);
4421 status = ICE_ERR_PARAM;
4425 buf->parent_teid = parent->info.node_teid;
4426 node.parent_teid = parent->info.node_teid;
4427 /* Mark that the values in the "generic" section as valid. The default
4428 * value in the "generic" section is zero. This means that :
4429 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4430 * - 0 priority among siblings, indicated by Bit 1-3.
4431 * - WFQ, indicated by Bit 4.
4432 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4434 * - Bit 7 is reserved.
4435 * Without setting the generic section as valid in valid_sections, the
4436 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4438 buf->txqs[0].info.valid_sections =
4439 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4440 ICE_AQC_ELEM_VALID_EIR;
4441 buf->txqs[0].info.generic = 0;
4442 buf->txqs[0].info.cir_bw.bw_profile_idx =
4443 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4444 buf->txqs[0].info.cir_bw.bw_alloc =
4445 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4446 buf->txqs[0].info.eir_bw.bw_profile_idx =
4447 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4448 buf->txqs[0].info.eir_bw.bw_alloc =
4449 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4451 /* add the LAN queue */
4452 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4453 if (status != ICE_SUCCESS) {
4454 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4455 LE16_TO_CPU(buf->txqs[0].txq_id),
4456 hw->adminq.sq_last_status);
4460 node.node_teid = buf->txqs[0].q_teid;
4461 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4462 q_ctx->q_handle = q_handle;
4463 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4465 /* add a leaf node into scheduler tree queue layer */
4466 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4468 status = ice_sched_replay_q_bw(pi, q_ctx);
4471 ice_release_lock(&pi->sched_lock);
4477 * @pi: port information structure
4478 * @vsi_handle: software VSI handle
4480 * @num_queues: number of queues
4481 * @q_handles: pointer to software queue handle array
4482 * @q_ids: pointer to the q_id array
4483 * @q_teids: pointer to queue node teids
4484 * @rst_src: if called due to reset, specifies the reset source
4485 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4486 * @cd: pointer to command details structure or NULL
4488 * This function removes queues and their corresponding nodes in SW DB
4491 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4492 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4493 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4494 struct ice_sq_cd *cd)
4496 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4497 struct ice_aqc_dis_txq_item *qg_list;
4498 struct ice_q_ctx *q_ctx;
4502 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4508 /* if queue is disabled already yet the disable queue command
4509 * has to be sent to complete the VF reset, then call
4510 * ice_aq_dis_lan_txq without any queue information
4513 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4518 buf_size = ice_struct_size(qg_list, q_id, 1);
4519 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
4521 return ICE_ERR_NO_MEMORY;
4523 ice_acquire_lock(&pi->sched_lock);
4525 for (i = 0; i < num_queues; i++) {
4526 struct ice_sched_node *node;
4528 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4531 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4533 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4537 if (q_ctx->q_handle != q_handles[i]) {
4538 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4539 q_ctx->q_handle, q_handles[i]);
4542 qg_list->parent_teid = node->info.parent_teid;
4543 qg_list->num_qs = 1;
4544 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
4545 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4548 if (status != ICE_SUCCESS)
4550 ice_free_sched_node(pi, node);
4551 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4553 ice_release_lock(&pi->sched_lock);
4554 ice_free(hw, qg_list);
4559 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4560 * @pi: port information structure
4561 * @vsi_handle: software VSI handle
4562 * @tc_bitmap: TC bitmap
4563 * @maxqs: max queues array per TC
4564 * @owner: LAN or RDMA
4566 * This function adds/updates the VSI queues per TC.
4568 static enum ice_status
4569 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4570 u16 *maxqs, u8 owner)
4572 enum ice_status status = ICE_SUCCESS;
4575 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4578 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4579 return ICE_ERR_PARAM;
4581 ice_acquire_lock(&pi->sched_lock);
4583 ice_for_each_traffic_class(i) {
4584 /* configuration is possible only if TC node is present */
4585 if (!ice_sched_get_tc_node(pi, i))
4588 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4589 ice_is_tc_ena(tc_bitmap, i));
4594 ice_release_lock(&pi->sched_lock);
4599 * ice_cfg_vsi_lan - configure VSI LAN queues
4600 * @pi: port information structure
4601 * @vsi_handle: software VSI handle
4602 * @tc_bitmap: TC bitmap
4603 * @max_lanqs: max LAN queues array per TC
4605 * This function adds/updates the VSI LAN queues per TC.
4608 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4611 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4612 ICE_SCHED_NODE_OWNER_LAN);
4616 * ice_is_main_vsi - checks whether the VSI is main VSI
4617 * @hw: pointer to the HW struct
4618 * @vsi_handle: VSI handle
4620 * Checks whether the VSI is the main VSI (the first PF VSI created on
4623 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4625 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4629 * ice_replay_pre_init - replay pre initialization
4630 * @hw: pointer to the HW struct
4631 * @sw: pointer to switch info struct for which function initializes filters
4633 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4635 static enum ice_status
4636 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4638 enum ice_status status;
4641 /* Delete old entries from replay filter list head if there is any */
4642 ice_rm_sw_replay_rule_info(hw, sw);
4643 /* In start of replay, move entries into replay_rules list, it
4644 * will allow adding rules entries back to filt_rules list,
4645 * which is operational list.
4647 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4648 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4649 &sw->recp_list[i].filt_replay_rules);
4650 ice_sched_replay_agg_vsi_preinit(hw);
4652 status = ice_sched_replay_root_node_bw(hw->port_info);
4656 return ice_sched_replay_tc_node_bw(hw->port_info);
4660 * ice_replay_vsi - replay VSI configuration
4661 * @hw: pointer to the HW struct
4662 * @vsi_handle: driver VSI handle
4664 * Restore all VSI configuration after reset. It is required to call this
4665 * function with main VSI first.
4667 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4669 struct ice_switch_info *sw = hw->switch_info;
4670 struct ice_port_info *pi = hw->port_info;
4671 enum ice_status status;
4673 if (!ice_is_vsi_valid(hw, vsi_handle))
4674 return ICE_ERR_PARAM;
4676 /* Replay pre-initialization if there is any */
4677 if (ice_is_main_vsi(hw, vsi_handle)) {
4678 status = ice_replay_pre_init(hw, sw);
4682 /* Replay per VSI all RSS configurations */
4683 status = ice_replay_rss_cfg(hw, vsi_handle);
4686 /* Replay per VSI all filters */
4687 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4689 status = ice_replay_vsi_agg(hw, vsi_handle);
4694 * ice_replay_post - post replay configuration cleanup
4695 * @hw: pointer to the HW struct
4697 * Post replay cleanup.
4699 void ice_replay_post(struct ice_hw *hw)
4701 /* Delete old entries from replay filter list head */
4702 ice_rm_all_sw_replay_rule_info(hw);
4703 ice_sched_replay_agg(hw);
4707 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4708 * @hw: ptr to the hardware info
4709 * @reg: offset of 64 bit HW register to read from
4710 * @prev_stat_loaded: bool to specify if previous stats are loaded
4711 * @prev_stat: ptr to previous loaded stat value
4712 * @cur_stat: ptr to current stat value
4715 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4716 u64 *prev_stat, u64 *cur_stat)
4718 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4720 /* device stats are not reset at PFR, they likely will not be zeroed
4721 * when the driver starts. Thus, save the value from the first read
4722 * without adding to the statistic value so that we report stats which
4723 * count up from zero.
4725 if (!prev_stat_loaded) {
4726 *prev_stat = new_data;
4730 /* Calculate the difference between the new and old values, and then
4731 * add it to the software stat value.
4733 if (new_data >= *prev_stat)
4734 *cur_stat += new_data - *prev_stat;
4736 /* to manage the potential roll-over */
4737 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4739 /* Update the previously stored value to prepare for next read */
4740 *prev_stat = new_data;
4744 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4745 * @hw: ptr to the hardware info
4746 * @reg: offset of HW register to read from
4747 * @prev_stat_loaded: bool to specify if previous stats are loaded
4748 * @prev_stat: ptr to previous loaded stat value
4749 * @cur_stat: ptr to current stat value
4752 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4753 u64 *prev_stat, u64 *cur_stat)
4757 new_data = rd32(hw, reg);
4759 /* device stats are not reset at PFR, they likely will not be zeroed
4760 * when the driver starts. Thus, save the value from the first read
4761 * without adding to the statistic value so that we report stats which
4762 * count up from zero.
4764 if (!prev_stat_loaded) {
4765 *prev_stat = new_data;
4769 /* Calculate the difference between the new and old values, and then
4770 * add it to the software stat value.
4772 if (new_data >= *prev_stat)
4773 *cur_stat += new_data - *prev_stat;
4775 /* to manage the potential roll-over */
4776 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4778 /* Update the previously stored value to prepare for next read */
4779 *prev_stat = new_data;
4783 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4784 * @hw: ptr to the hardware info
4785 * @vsi_handle: VSI handle
4786 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4787 * @cur_stats: ptr to current stats structure
4789 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4790 * thus cannot be read using the normal ice_stat_update32 function.
4792 * Read the GLV_REPC register associated with the given VSI, and update the
4793 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4795 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4796 * cleared each time it's read.
4798 * Note that the GLV_RDPC register also counts the causes that would trigger
4799 * GLV_REPC. However, it does not give the finer grained detail about why the
4800 * packets are being dropped. The GLV_REPC values can be used to distinguish
4801 * whether Rx packets are dropped due to errors or due to no available
4805 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4806 struct ice_eth_stats *cur_stats)
4808 u16 vsi_num, no_desc, error_cnt;
4811 if (!ice_is_vsi_valid(hw, vsi_handle))
4814 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4816 /* If we haven't loaded stats yet, just clear the current value */
4817 if (!prev_stat_loaded) {
4818 wr32(hw, GLV_REPC(vsi_num), 0);
4822 repc = rd32(hw, GLV_REPC(vsi_num));
4823 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4824 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4826 /* Clear the count by writing to the stats register */
4827 wr32(hw, GLV_REPC(vsi_num), 0);
4829 cur_stats->rx_no_desc += no_desc;
4830 cur_stats->rx_errors += error_cnt;
4834 * ice_sched_query_elem - query element information from HW
4835 * @hw: pointer to the HW struct
4836 * @node_teid: node TEID to be queried
4837 * @buf: buffer to element information
4839 * This function queries HW element information
4842 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4843 struct ice_aqc_txsched_elem_data *buf)
4845 u16 buf_size, num_elem_ret = 0;
4846 enum ice_status status;
4848 buf_size = sizeof(*buf);
4849 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4850 buf->node_teid = CPU_TO_LE32(node_teid);
4851 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4853 if (status != ICE_SUCCESS || num_elem_ret != 1)
4854 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4859 * ice_get_fw_mode - returns FW mode
4860 * @hw: pointer to the HW struct
4862 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4864 #define ICE_FW_MODE_DBG_M BIT(0)
4865 #define ICE_FW_MODE_REC_M BIT(1)
4866 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4869 /* check the current FW mode */
4870 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4872 if (fw_mode & ICE_FW_MODE_DBG_M)
4873 return ICE_FW_MODE_DBG;
4874 else if (fw_mode & ICE_FW_MODE_REC_M)
4875 return ICE_FW_MODE_REC;
4876 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4877 return ICE_FW_MODE_ROLLBACK;
4879 return ICE_FW_MODE_NORMAL;
4884 * @hw: pointer to the hw struct
4885 * @topo_addr: topology address for a device to communicate with
4886 * @bus_addr: 7-bit I2C bus address
4887 * @addr: I2C memory address (I2C offset) with up to 16 bits
4888 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
4889 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
4890 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
4891 * @cd: pointer to command details structure or NULL
4896 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
4897 u16 bus_addr, __le16 addr, u8 params, u8 *data,
4898 struct ice_sq_cd *cd)
4900 struct ice_aq_desc desc = { 0 };
4901 struct ice_aqc_i2c *cmd;
4902 enum ice_status status;
4905 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
4906 cmd = &desc.params.read_write_i2c;
4909 return ICE_ERR_PARAM;
4911 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
4913 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
4914 cmd->topo_addr = topo_addr;
4915 cmd->i2c_params = params;
4916 cmd->i2c_addr = addr;
4918 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4920 struct ice_aqc_read_i2c_resp *resp;
4923 resp = &desc.params.read_i2c_resp;
4924 for (i = 0; i < data_size; i++) {
4925 *data = resp->i2c_data[i];
4935 * @hw: pointer to the hw struct
4936 * @topo_addr: topology address for a device to communicate with
4937 * @bus_addr: 7-bit I2C bus address
4938 * @addr: I2C memory address (I2C offset) with up to 16 bits
4939 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
4940 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
4941 * @cd: pointer to command details structure or NULL
4943 * Write I2C (0x06E3)
4946 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
4947 u16 bus_addr, __le16 addr, u8 params, u8 *data,
4948 struct ice_sq_cd *cd)
4950 struct ice_aq_desc desc = { 0 };
4951 struct ice_aqc_i2c *cmd;
4954 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
4955 cmd = &desc.params.read_write_i2c;
4957 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
4959 /* data_size limited to 4 */
4961 return ICE_ERR_PARAM;
4963 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
4964 cmd->topo_addr = topo_addr;
4965 cmd->i2c_params = params;
4966 cmd->i2c_addr = addr;
4968 for (i = 0; i < data_size; i++) {
4969 cmd->i2c_data[i] = *data;
4973 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4978 * @hw: pointer to the hw struct
4979 * @gpio_ctrl_handle: GPIO controller node handle
4980 * @pin_idx: IO Number of the GPIO that needs to be set
4981 * @value: SW provide IO value to set in the LSB
4982 * @cd: pointer to command details structure or NULL
4984 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
4987 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
4988 struct ice_sq_cd *cd)
4990 struct ice_aqc_gpio *cmd;
4991 struct ice_aq_desc desc;
4993 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
4994 cmd = &desc.params.read_write_gpio;
4995 cmd->gpio_ctrl_handle = gpio_ctrl_handle;
4996 cmd->gpio_num = pin_idx;
4997 cmd->gpio_val = value ? 1 : 0;
4999 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5004 * @hw: pointer to the hw struct
5005 * @gpio_ctrl_handle: GPIO controller node handle
5006 * @pin_idx: IO Number of the GPIO that needs to be set
5007 * @value: IO value read
5008 * @cd: pointer to command details structure or NULL
5010 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
5014 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5015 bool *value, struct ice_sq_cd *cd)
5017 struct ice_aqc_gpio *cmd;
5018 struct ice_aq_desc desc;
5019 enum ice_status status;
5021 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5022 cmd = &desc.params.read_write_gpio;
5023 cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5024 cmd->gpio_num = pin_idx;
5026 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5030 *value = !!cmd->gpio_val;
5035 * ice_fw_supports_link_override
5036 * @hw: pointer to the hardware structure
5038 * Checks if the firmware supports link override
5040 bool ice_fw_supports_link_override(struct ice_hw *hw)
5042 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5043 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5045 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5046 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5048 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5056 * ice_get_link_default_override
5057 * @ldo: pointer to the link default override struct
5058 * @pi: pointer to the port info struct
5060 * Gets the link default override for a port
5063 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5064 struct ice_port_info *pi)
5066 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5067 struct ice_hw *hw = pi->hw;
5068 enum ice_status status;
5070 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5071 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5073 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5077 /* Each port has its own config; calculate for our port */
5078 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5079 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5081 /* link options first */
5082 status = ice_read_sr_word(hw, tlv_start, &buf);
5084 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5087 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5088 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5089 ICE_LINK_OVERRIDE_PHY_CFG_S;
5091 /* link PHY config */
5092 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5093 status = ice_read_sr_word(hw, offset, &buf);
5095 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5098 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5101 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5102 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5103 status = ice_read_sr_word(hw, (offset + i), &buf);
5105 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5108 /* shift 16 bits at a time to fill 64 bits */
5109 ldo->phy_type_low |= ((u64)buf << (i * 16));
5112 /* PHY types high */
5113 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5114 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5115 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5116 status = ice_read_sr_word(hw, (offset + i), &buf);
5118 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5121 /* shift 16 bits at a time to fill 64 bits */
5122 ldo->phy_type_high |= ((u64)buf << (i * 16));
5129 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5130 * @caps: get PHY capability data
5132 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5134 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5135 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5136 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5137 ICE_AQC_PHY_AN_EN_CLAUSE37))
5144 * ice_aq_set_lldp_mib - Set the LLDP MIB
5145 * @hw: pointer to the HW struct
5146 * @mib_type: Local, Remote or both Local and Remote MIBs
5147 * @buf: pointer to the caller-supplied buffer to store the MIB block
5148 * @buf_size: size of the buffer (in bytes)
5149 * @cd: pointer to command details structure or NULL
5151 * Set the LLDP MIB. (0x0A08)
5154 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5155 struct ice_sq_cd *cd)
5157 struct ice_aqc_lldp_set_local_mib *cmd;
5158 struct ice_aq_desc desc;
5160 cmd = &desc.params.lldp_set_mib;
5162 if (buf_size == 0 || !buf)
5163 return ICE_ERR_PARAM;
5165 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5167 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
5168 desc.datalen = CPU_TO_LE16(buf_size);
5170 cmd->type = mib_type;
5171 cmd->length = CPU_TO_LE16(buf_size);
5173 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5177 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
5178 * @hw: pointer to HW struct
5180 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5182 if (hw->mac_type != ICE_MAC_E810)
5185 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5186 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5188 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5189 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5191 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5198 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5199 * @hw: pointer to HW struct
5200 * @vsi_num: absolute HW index for VSI
5201 * @add: boolean for if adding or removing a filter
5204 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5206 struct ice_aqc_lldp_filter_ctrl *cmd;
5207 struct ice_aq_desc desc;
5209 cmd = &desc.params.lldp_filter_ctrl;
5211 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5214 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5216 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5218 cmd->vsi_num = CPU_TO_LE16(vsi_num);
5220 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5224 * ice_fw_supports_report_dflt_cfg
5225 * @hw: pointer to the hardware structure
5227 * Checks if the firmware supports report default configuration
5229 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5231 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5232 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5234 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5235 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5237 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {