1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 300
15 * ice_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
18 * This function sets the MAC type of the adapter based on the
19 * vendor ID and device ID stored in the HW structure.
21 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
23 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
25 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
26 return ICE_ERR_DEVICE_NOT_SUPPORTED;
28 switch (hw->device_id) {
29 case ICE_DEV_ID_E810C_BACKPLANE:
30 case ICE_DEV_ID_E810C_QSFP:
31 case ICE_DEV_ID_E810C_SFP:
32 case ICE_DEV_ID_E810_XXV_BACKPLANE:
33 case ICE_DEV_ID_E810_XXV_QSFP:
34 case ICE_DEV_ID_E810_XXV_SFP:
35 hw->mac_type = ICE_MAC_E810;
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 case ICE_DEV_ID_E823C_10G_BASE_T:
52 case ICE_DEV_ID_E823C_BACKPLANE:
53 case ICE_DEV_ID_E823C_QSFP:
54 case ICE_DEV_ID_E823C_SFP:
55 case ICE_DEV_ID_E823C_SGMII:
56 hw->mac_type = ICE_MAC_GENERIC;
59 hw->mac_type = ICE_MAC_UNKNOWN;
63 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
68 * ice_clear_pf_cfg - Clear PF configuration
69 * @hw: pointer to the hardware structure
71 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
72 * configuration, flow director filters, etc.).
74 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
76 struct ice_aq_desc desc;
78 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
80 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
84 * ice_aq_manage_mac_read - manage MAC address read command
85 * @hw: pointer to the HW struct
86 * @buf: a virtual buffer to hold the manage MAC read response
87 * @buf_size: Size of the virtual buffer
88 * @cd: pointer to command details structure or NULL
90 * This function is used to return per PF station MAC address (0x0107).
91 * NOTE: Upon successful completion of this command, MAC address information
92 * is returned in user specified buffer. Please interpret user specified
93 * buffer as "manage_mac_read" response.
94 * Response such as various MAC addresses are stored in HW struct (port.mac)
95 * ice_discover_dev_caps is expected to be called before this function is
98 static enum ice_status
99 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
100 struct ice_sq_cd *cd)
102 struct ice_aqc_manage_mac_read_resp *resp;
103 struct ice_aqc_manage_mac_read *cmd;
104 struct ice_aq_desc desc;
105 enum ice_status status;
109 cmd = &desc.params.mac_read;
111 if (buf_size < sizeof(*resp))
112 return ICE_ERR_BUF_TOO_SHORT;
114 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
116 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
120 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
121 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
123 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
124 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
128 /* A single port can report up to two (LAN and WoL) addresses */
129 for (i = 0; i < cmd->num_addr; i++)
130 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
131 ice_memcpy(hw->port_info->mac.lan_addr,
132 resp[i].mac_addr, ETH_ALEN,
134 ice_memcpy(hw->port_info->mac.perm_addr,
136 ETH_ALEN, ICE_DMA_TO_NONDMA);
143 * ice_aq_get_phy_caps - returns PHY capabilities
144 * @pi: port information structure
145 * @qual_mods: report qualified modules
146 * @report_mode: report mode capabilities
147 * @pcaps: structure for PHY capabilities to be filled
148 * @cd: pointer to command details structure or NULL
150 * Returns the various PHY capabilities supported on the Port (0x0600)
153 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
154 struct ice_aqc_get_phy_caps_data *pcaps,
155 struct ice_sq_cd *cd)
157 struct ice_aqc_get_phy_caps *cmd;
158 u16 pcaps_size = sizeof(*pcaps);
159 struct ice_aq_desc desc;
160 enum ice_status status;
163 cmd = &desc.params.get_phy;
165 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
166 return ICE_ERR_PARAM;
169 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
170 !ice_fw_supports_report_dflt_cfg(hw))
171 return ICE_ERR_PARAM;
173 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
176 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
178 cmd->param0 |= CPU_TO_LE16(report_mode);
179 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
181 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
183 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
184 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
185 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
186 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
187 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
188 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
189 pcaps->low_power_ctrl_an);
190 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
191 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
193 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
194 pcaps->link_fec_options);
195 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
196 pcaps->module_compliance_enforcement);
197 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
198 pcaps->extended_compliance_code);
199 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
200 pcaps->module_type[0]);
201 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
202 pcaps->module_type[1]);
203 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
204 pcaps->module_type[2]);
206 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
207 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
208 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
209 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
210 sizeof(pi->phy.link_info.module_type),
211 ICE_NONDMA_TO_NONDMA);
218 * ice_aq_get_link_topo_handle - get link topology node return status
219 * @pi: port information structure
220 * @node_type: requested node type
221 * @cd: pointer to command details structure or NULL
223 * Get link topology node return status for specified node type (0x06E0)
225 * Node type cage can be used to determine if cage is present. If AQC
226 * returns error (ENOENT), then no cage present. If no cage present, then
227 * connection type is backplane or BASE-T.
229 static enum ice_status
230 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
231 struct ice_sq_cd *cd)
233 struct ice_aqc_get_link_topo *cmd;
234 struct ice_aq_desc desc;
236 cmd = &desc.params.get_link_topo;
238 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
240 cmd->addr.topo_params.node_type_ctx =
241 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
242 ICE_AQC_LINK_TOPO_NODE_CTX_S);
245 cmd->addr.topo_params.node_type_ctx |=
246 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
248 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
252 * ice_is_media_cage_present
253 * @pi: port information structure
255 * Returns true if media cage is present, else false. If no cage, then
256 * media type is backplane or BASE-T.
258 static bool ice_is_media_cage_present(struct ice_port_info *pi)
260 /* Node type cage can be used to determine if cage is present. If AQC
261 * returns error (ENOENT), then no cage present. If no cage present then
262 * connection type is backplane or BASE-T.
264 return !ice_aq_get_link_topo_handle(pi,
265 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
270 * ice_get_media_type - Gets media type
271 * @pi: port information structure
273 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
275 struct ice_link_status *hw_link_info;
278 return ICE_MEDIA_UNKNOWN;
280 hw_link_info = &pi->phy.link_info;
281 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
282 /* If more than one media type is selected, report unknown */
283 return ICE_MEDIA_UNKNOWN;
285 if (hw_link_info->phy_type_low) {
286 /* 1G SGMII is a special case where some DA cable PHYs
287 * may show this as an option when it really shouldn't
288 * be since SGMII is meant to be between a MAC and a PHY
289 * in a backplane. Try to detect this case and handle it
291 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
292 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
293 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
294 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
295 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
298 switch (hw_link_info->phy_type_low) {
299 case ICE_PHY_TYPE_LOW_1000BASE_SX:
300 case ICE_PHY_TYPE_LOW_1000BASE_LX:
301 case ICE_PHY_TYPE_LOW_10GBASE_SR:
302 case ICE_PHY_TYPE_LOW_10GBASE_LR:
303 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
304 case ICE_PHY_TYPE_LOW_25GBASE_SR:
305 case ICE_PHY_TYPE_LOW_25GBASE_LR:
306 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
307 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
308 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
309 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
310 case ICE_PHY_TYPE_LOW_50GBASE_SR:
311 case ICE_PHY_TYPE_LOW_50GBASE_FR:
312 case ICE_PHY_TYPE_LOW_50GBASE_LR:
313 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
314 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
315 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
316 case ICE_PHY_TYPE_LOW_100GBASE_DR:
317 return ICE_MEDIA_FIBER;
318 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
319 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
320 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
321 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
322 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
323 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
324 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
325 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
326 return ICE_MEDIA_FIBER;
327 case ICE_PHY_TYPE_LOW_100BASE_TX:
328 case ICE_PHY_TYPE_LOW_1000BASE_T:
329 case ICE_PHY_TYPE_LOW_2500BASE_T:
330 case ICE_PHY_TYPE_LOW_5GBASE_T:
331 case ICE_PHY_TYPE_LOW_10GBASE_T:
332 case ICE_PHY_TYPE_LOW_25GBASE_T:
333 return ICE_MEDIA_BASET;
334 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
335 case ICE_PHY_TYPE_LOW_25GBASE_CR:
336 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
337 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
338 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
339 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
340 case ICE_PHY_TYPE_LOW_50GBASE_CP:
341 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
342 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
343 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
345 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
346 case ICE_PHY_TYPE_LOW_40G_XLAUI:
347 case ICE_PHY_TYPE_LOW_50G_LAUI2:
348 case ICE_PHY_TYPE_LOW_50G_AUI2:
349 case ICE_PHY_TYPE_LOW_50G_AUI1:
350 case ICE_PHY_TYPE_LOW_100G_AUI4:
351 case ICE_PHY_TYPE_LOW_100G_CAUI4:
352 if (ice_is_media_cage_present(pi))
353 return ICE_MEDIA_AUI;
355 case ICE_PHY_TYPE_LOW_1000BASE_KX:
356 case ICE_PHY_TYPE_LOW_2500BASE_KX:
357 case ICE_PHY_TYPE_LOW_2500BASE_X:
358 case ICE_PHY_TYPE_LOW_5GBASE_KR:
359 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
360 case ICE_PHY_TYPE_LOW_25GBASE_KR:
361 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
362 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
363 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
364 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
365 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
366 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
367 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
368 return ICE_MEDIA_BACKPLANE;
371 switch (hw_link_info->phy_type_high) {
372 case ICE_PHY_TYPE_HIGH_100G_AUI2:
373 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
374 if (ice_is_media_cage_present(pi))
375 return ICE_MEDIA_AUI;
377 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
378 return ICE_MEDIA_BACKPLANE;
379 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
380 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
381 return ICE_MEDIA_FIBER;
384 return ICE_MEDIA_UNKNOWN;
388 * ice_aq_get_link_info
389 * @pi: port information structure
390 * @ena_lse: enable/disable LinkStatusEvent reporting
391 * @link: pointer to link status structure - optional
392 * @cd: pointer to command details structure or NULL
394 * Get Link Status (0x607). Returns the link status of the adapter.
397 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
398 struct ice_link_status *link, struct ice_sq_cd *cd)
400 struct ice_aqc_get_link_status_data link_data = { 0 };
401 struct ice_aqc_get_link_status *resp;
402 struct ice_link_status *li_old, *li;
403 enum ice_media_type *hw_media_type;
404 struct ice_fc_info *hw_fc_info;
405 bool tx_pause, rx_pause;
406 struct ice_aq_desc desc;
407 enum ice_status status;
412 return ICE_ERR_PARAM;
414 li_old = &pi->phy.link_info_old;
415 hw_media_type = &pi->phy.media_type;
416 li = &pi->phy.link_info;
417 hw_fc_info = &pi->fc;
419 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
420 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
421 resp = &desc.params.get_link_status;
422 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
423 resp->lport_num = pi->lport;
425 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
427 if (status != ICE_SUCCESS)
430 /* save off old link status information */
433 /* update current link status information */
434 li->link_speed = LE16_TO_CPU(link_data.link_speed);
435 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
436 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
437 *hw_media_type = ice_get_media_type(pi);
438 li->link_info = link_data.link_info;
439 li->link_cfg_err = link_data.link_cfg_err;
440 li->an_info = link_data.an_info;
441 li->ext_info = link_data.ext_info;
442 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
443 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
444 li->topo_media_conflict = link_data.topo_media_conflict;
445 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
446 ICE_AQ_CFG_PACING_TYPE_M);
449 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
450 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
451 if (tx_pause && rx_pause)
452 hw_fc_info->current_mode = ICE_FC_FULL;
454 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
456 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
458 hw_fc_info->current_mode = ICE_FC_NONE;
460 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
462 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
463 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
464 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
465 (unsigned long long)li->phy_type_low);
466 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
467 (unsigned long long)li->phy_type_high);
468 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
469 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
470 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
471 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
472 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
473 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
474 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
475 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
477 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
479 /* save link status information */
483 /* flag cleared so calling functions don't call AQ again */
484 pi->phy.get_link_info = false;
490 * ice_fill_tx_timer_and_fc_thresh
491 * @hw: pointer to the HW struct
492 * @cmd: pointer to MAC cfg structure
494 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
498 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
499 struct ice_aqc_set_mac_cfg *cmd)
501 u16 fc_thres_val, tx_timer_val;
504 /* We read back the transmit timer and fc threshold value of
505 * LFC. Thus, we will use index =
506 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
508 * Also, because we are opearating on transmit timer and fc
509 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
511 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
513 /* Retrieve the transmit timer */
514 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
516 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
517 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
519 /* Retrieve the fc threshold */
520 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
521 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
523 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
528 * @hw: pointer to the HW struct
529 * @max_frame_size: Maximum Frame Size to be supported
530 * @cd: pointer to command details structure or NULL
532 * Set MAC configuration (0x0603)
535 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
537 struct ice_aqc_set_mac_cfg *cmd;
538 struct ice_aq_desc desc;
540 cmd = &desc.params.set_mac_cfg;
542 if (max_frame_size == 0)
543 return ICE_ERR_PARAM;
545 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
547 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
549 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
551 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
555 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
556 * @hw: pointer to the HW struct
558 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
560 struct ice_switch_info *sw;
561 enum ice_status status;
563 hw->switch_info = (struct ice_switch_info *)
564 ice_malloc(hw, sizeof(*hw->switch_info));
566 sw = hw->switch_info;
569 return ICE_ERR_NO_MEMORY;
571 INIT_LIST_HEAD(&sw->vsi_list_map_head);
572 sw->prof_res_bm_init = 0;
574 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
576 ice_free(hw, hw->switch_info);
583 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
584 * @hw: pointer to the HW struct
585 * @sw: pointer to switch info struct for which function clears filters
588 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
590 struct ice_vsi_list_map_info *v_pos_map;
591 struct ice_vsi_list_map_info *v_tmp_map;
592 struct ice_sw_recipe *recps;
598 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
599 ice_vsi_list_map_info, list_entry) {
600 LIST_DEL(&v_pos_map->list_entry);
601 ice_free(hw, v_pos_map);
603 recps = sw->recp_list;
604 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
605 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
607 recps[i].root_rid = i;
608 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
609 &recps[i].rg_list, ice_recp_grp_entry,
611 LIST_DEL(&rg_entry->l_entry);
612 ice_free(hw, rg_entry);
615 if (recps[i].adv_rule) {
616 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
617 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
619 ice_destroy_lock(&recps[i].filt_rule_lock);
620 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
621 &recps[i].filt_rules,
622 ice_adv_fltr_mgmt_list_entry,
624 LIST_DEL(&lst_itr->list_entry);
625 ice_free(hw, lst_itr->lkups);
626 ice_free(hw, lst_itr);
629 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
631 ice_destroy_lock(&recps[i].filt_rule_lock);
632 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
633 &recps[i].filt_rules,
634 ice_fltr_mgmt_list_entry,
636 LIST_DEL(&lst_itr->list_entry);
637 ice_free(hw, lst_itr);
640 if (recps[i].root_buf)
641 ice_free(hw, recps[i].root_buf);
643 ice_rm_sw_replay_rule_info(hw, sw);
644 ice_free(hw, sw->recp_list);
649 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
650 * @hw: pointer to the HW struct
652 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
654 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
658 * ice_get_itr_intrl_gran
659 * @hw: pointer to the HW struct
661 * Determines the ITR/INTRL granularities based on the maximum aggregate
662 * bandwidth according to the device's configuration during power-on.
664 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
666 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
667 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
668 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
670 switch (max_agg_bw) {
671 case ICE_MAX_AGG_BW_200G:
672 case ICE_MAX_AGG_BW_100G:
673 case ICE_MAX_AGG_BW_50G:
674 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
675 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
677 case ICE_MAX_AGG_BW_25G:
678 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
679 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
685 * ice_print_rollback_msg - print FW rollback message
686 * @hw: pointer to the hardware structure
688 void ice_print_rollback_msg(struct ice_hw *hw)
690 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
691 struct ice_orom_info *orom;
692 struct ice_nvm_info *nvm;
694 orom = &hw->flash.orom;
695 nvm = &hw->flash.nvm;
697 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
698 nvm->major, nvm->minor, nvm->eetrack, orom->major,
699 orom->build, orom->patch);
701 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
702 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
706 * ice_init_hw - main hardware initialization routine
707 * @hw: pointer to the hardware structure
709 enum ice_status ice_init_hw(struct ice_hw *hw)
711 struct ice_aqc_get_phy_caps_data *pcaps;
712 enum ice_status status;
716 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
718 /* Set MAC type based on DeviceID */
719 status = ice_set_mac_type(hw);
723 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
724 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
725 PF_FUNC_RID_FUNCTION_NUMBER_S;
727 status = ice_reset(hw, ICE_RESET_PFR);
731 ice_get_itr_intrl_gran(hw);
733 status = ice_create_all_ctrlq(hw);
735 goto err_unroll_cqinit;
737 status = ice_init_nvm(hw);
739 goto err_unroll_cqinit;
741 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
742 ice_print_rollback_msg(hw);
744 status = ice_clear_pf_cfg(hw);
746 goto err_unroll_cqinit;
748 /* Set bit to enable Flow Director filters */
749 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
750 INIT_LIST_HEAD(&hw->fdir_list_head);
752 ice_clear_pxe_mode(hw);
754 status = ice_get_caps(hw);
756 goto err_unroll_cqinit;
758 hw->port_info = (struct ice_port_info *)
759 ice_malloc(hw, sizeof(*hw->port_info));
760 if (!hw->port_info) {
761 status = ICE_ERR_NO_MEMORY;
762 goto err_unroll_cqinit;
765 /* set the back pointer to HW */
766 hw->port_info->hw = hw;
768 /* Initialize port_info struct with switch configuration data */
769 status = ice_get_initial_sw_cfg(hw);
771 goto err_unroll_alloc;
774 /* Query the allocated resources for Tx scheduler */
775 status = ice_sched_query_res_alloc(hw);
777 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
778 goto err_unroll_alloc;
780 ice_sched_get_psm_clk_freq(hw);
782 /* Initialize port_info struct with scheduler data */
783 status = ice_sched_init_port(hw->port_info);
785 goto err_unroll_sched;
786 pcaps = (struct ice_aqc_get_phy_caps_data *)
787 ice_malloc(hw, sizeof(*pcaps));
789 status = ICE_ERR_NO_MEMORY;
790 goto err_unroll_sched;
793 /* Initialize port_info struct with PHY capabilities */
794 status = ice_aq_get_phy_caps(hw->port_info, false,
795 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
798 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
801 /* Initialize port_info struct with link information */
802 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
804 goto err_unroll_sched;
805 /* need a valid SW entry point to build a Tx tree */
806 if (!hw->sw_entry_point_layer) {
807 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
808 status = ICE_ERR_CFG;
809 goto err_unroll_sched;
811 INIT_LIST_HEAD(&hw->agg_list);
812 /* Initialize max burst size */
813 if (!hw->max_burst_size)
814 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
815 status = ice_init_fltr_mgmt_struct(hw);
817 goto err_unroll_sched;
819 /* Get MAC information */
820 /* A single port can report up to two (LAN and WoL) addresses */
821 mac_buf = ice_calloc(hw, 2,
822 sizeof(struct ice_aqc_manage_mac_read_resp));
823 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
826 status = ICE_ERR_NO_MEMORY;
827 goto err_unroll_fltr_mgmt_struct;
830 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
831 ice_free(hw, mac_buf);
834 goto err_unroll_fltr_mgmt_struct;
835 /* Obtain counter base index which would be used by flow director */
836 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
838 goto err_unroll_fltr_mgmt_struct;
839 status = ice_init_hw_tbls(hw);
841 goto err_unroll_fltr_mgmt_struct;
842 ice_init_lock(&hw->tnl_lock);
846 err_unroll_fltr_mgmt_struct:
847 ice_cleanup_fltr_mgmt_struct(hw);
849 ice_sched_cleanup_all(hw);
851 ice_free(hw, hw->port_info);
852 hw->port_info = NULL;
854 ice_destroy_all_ctrlq(hw);
859 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
860 * @hw: pointer to the hardware structure
862 * This should be called only during nominal operation, not as a result of
863 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
864 * applicable initializations if it fails for any reason.
866 void ice_deinit_hw(struct ice_hw *hw)
868 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
869 ice_cleanup_fltr_mgmt_struct(hw);
871 ice_sched_cleanup_all(hw);
872 ice_sched_clear_agg(hw);
874 ice_free_hw_tbls(hw);
875 ice_destroy_lock(&hw->tnl_lock);
878 ice_free(hw, hw->port_info);
879 hw->port_info = NULL;
882 ice_destroy_all_ctrlq(hw);
884 /* Clear VSI contexts if not already cleared */
885 ice_clear_all_vsi_ctx(hw);
889 * ice_check_reset - Check to see if a global reset is complete
890 * @hw: pointer to the hardware structure
892 enum ice_status ice_check_reset(struct ice_hw *hw)
894 u32 cnt, reg = 0, grst_timeout, uld_mask;
896 /* Poll for Device Active state in case a recent CORER, GLOBR,
897 * or EMPR has occurred. The grst delay value is in 100ms units.
898 * Add 1sec for outstanding AQ commands that can take a long time.
900 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
901 GLGEN_RSTCTL_GRSTDEL_S) + 10;
903 for (cnt = 0; cnt < grst_timeout; cnt++) {
904 ice_msec_delay(100, true);
905 reg = rd32(hw, GLGEN_RSTAT);
906 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
910 if (cnt == grst_timeout) {
911 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
912 return ICE_ERR_RESET_FAILED;
915 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
916 GLNVM_ULD_PCIER_DONE_1_M |\
917 GLNVM_ULD_CORER_DONE_M |\
918 GLNVM_ULD_GLOBR_DONE_M |\
919 GLNVM_ULD_POR_DONE_M |\
920 GLNVM_ULD_POR_DONE_1_M |\
921 GLNVM_ULD_PCIER_DONE_2_M)
923 uld_mask = ICE_RESET_DONE_MASK;
925 /* Device is Active; check Global Reset processes are done */
926 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
927 reg = rd32(hw, GLNVM_ULD) & uld_mask;
928 if (reg == uld_mask) {
929 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
932 ice_msec_delay(10, true);
935 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
936 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
938 return ICE_ERR_RESET_FAILED;
945 * ice_pf_reset - Reset the PF
946 * @hw: pointer to the hardware structure
948 * If a global reset has been triggered, this function checks
949 * for its completion and then issues the PF reset
951 static enum ice_status ice_pf_reset(struct ice_hw *hw)
955 /* If at function entry a global reset was already in progress, i.e.
956 * state is not 'device active' or any of the reset done bits are not
957 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
958 * global reset is done.
960 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
961 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
962 /* poll on global reset currently in progress until done */
963 if (ice_check_reset(hw))
964 return ICE_ERR_RESET_FAILED;
970 reg = rd32(hw, PFGEN_CTRL);
972 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
974 /* Wait for the PFR to complete. The wait time is the global config lock
975 * timeout plus the PFR timeout which will account for a possible reset
976 * that is occurring during a download package operation.
978 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
979 ICE_PF_RESET_WAIT_COUNT; cnt++) {
980 reg = rd32(hw, PFGEN_CTRL);
981 if (!(reg & PFGEN_CTRL_PFSWR_M))
984 ice_msec_delay(1, true);
987 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
988 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
989 return ICE_ERR_RESET_FAILED;
996 * ice_reset - Perform different types of reset
997 * @hw: pointer to the hardware structure
998 * @req: reset request
1000 * This function triggers a reset as specified by the req parameter.
1003 * If anything other than a PF reset is triggered, PXE mode is restored.
1004 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1005 * interface has been restored in the rebuild flow.
1007 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1013 return ice_pf_reset(hw);
1014 case ICE_RESET_CORER:
1015 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1016 val = GLGEN_RTRIG_CORER_M;
1018 case ICE_RESET_GLOBR:
1019 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1020 val = GLGEN_RTRIG_GLOBR_M;
1023 return ICE_ERR_PARAM;
1026 val |= rd32(hw, GLGEN_RTRIG);
1027 wr32(hw, GLGEN_RTRIG, val);
1030 /* wait for the FW to be ready */
1031 return ice_check_reset(hw);
1035 * ice_copy_rxq_ctx_to_hw
1036 * @hw: pointer to the hardware structure
1037 * @ice_rxq_ctx: pointer to the rxq context
1038 * @rxq_index: the index of the Rx queue
1040 * Copies rxq context from dense structure to HW register space
1042 static enum ice_status
1043 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1048 return ICE_ERR_BAD_PTR;
1050 if (rxq_index > QRX_CTRL_MAX_INDEX)
1051 return ICE_ERR_PARAM;
1053 /* Copy each dword separately to HW */
1054 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1055 wr32(hw, QRX_CONTEXT(i, rxq_index),
1056 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1058 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1059 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1065 /* LAN Rx Queue Context */
1066 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1067 /* Field Width LSB */
1068 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1069 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1070 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1071 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1072 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1073 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1074 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1075 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1076 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1077 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1078 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1079 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1080 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1081 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1082 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1083 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1084 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1085 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1086 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1087 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1093 * @hw: pointer to the hardware structure
1094 * @rlan_ctx: pointer to the rxq context
1095 * @rxq_index: the index of the Rx queue
1097 * Converts rxq context from sparse to dense structure and then writes
1098 * it to HW register space and enables the hardware to prefetch descriptors
1099 * instead of only fetching them on demand
1102 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1105 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1108 return ICE_ERR_BAD_PTR;
1110 rlan_ctx->prefena = 1;
1112 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1113 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1118 * @hw: pointer to the hardware structure
1119 * @rxq_index: the index of the Rx queue to clear
1121 * Clears rxq context in HW register space
1123 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1127 if (rxq_index > QRX_CTRL_MAX_INDEX)
1128 return ICE_ERR_PARAM;
1130 /* Clear each dword register separately */
1131 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1132 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1137 /* LAN Tx Queue Context */
1138 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1139 /* Field Width LSB */
1140 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1141 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1142 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1143 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1144 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1145 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1146 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1147 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1148 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1149 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1150 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1151 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1152 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1153 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1154 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1155 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1156 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1157 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1158 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1159 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1160 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1161 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1162 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1163 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1164 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1165 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1166 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1167 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1172 * ice_copy_tx_cmpltnq_ctx_to_hw
1173 * @hw: pointer to the hardware structure
1174 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1175 * @tx_cmpltnq_index: the index of the completion queue
1177 * Copies Tx completion queue context from dense structure to HW register space
1179 static enum ice_status
1180 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1181 u32 tx_cmpltnq_index)
1185 if (!ice_tx_cmpltnq_ctx)
1186 return ICE_ERR_BAD_PTR;
1188 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1189 return ICE_ERR_PARAM;
1191 /* Copy each dword separately to HW */
1192 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1193 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1194 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1196 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1197 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1203 /* LAN Tx Completion Queue Context */
1204 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1205 /* Field Width LSB */
1206 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1207 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1208 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1209 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1210 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1211 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1212 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1213 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1214 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1215 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1220 * ice_write_tx_cmpltnq_ctx
1221 * @hw: pointer to the hardware structure
1222 * @tx_cmpltnq_ctx: pointer to the completion queue context
1223 * @tx_cmpltnq_index: the index of the completion queue
1225 * Converts completion queue context from sparse to dense structure and then
1226 * writes it to HW register space
1229 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1230 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1231 u32 tx_cmpltnq_index)
1233 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1235 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1236 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1240 * ice_clear_tx_cmpltnq_ctx
1241 * @hw: pointer to the hardware structure
1242 * @tx_cmpltnq_index: the index of the completion queue to clear
1244 * Clears Tx completion queue context in HW register space
1247 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1251 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1252 return ICE_ERR_PARAM;
1254 /* Clear each dword register separately */
1255 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1256 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1262 * ice_copy_tx_drbell_q_ctx_to_hw
1263 * @hw: pointer to the hardware structure
1264 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1265 * @tx_drbell_q_index: the index of the doorbell queue
1267 * Copies doorbell queue context from dense structure to HW register space
1269 static enum ice_status
1270 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1271 u32 tx_drbell_q_index)
1275 if (!ice_tx_drbell_q_ctx)
1276 return ICE_ERR_BAD_PTR;
1278 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1279 return ICE_ERR_PARAM;
1281 /* Copy each dword separately to HW */
1282 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1283 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1284 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1286 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1287 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1293 /* LAN Tx Doorbell Queue Context info */
1294 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1295 /* Field Width LSB */
1296 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1297 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1298 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1299 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1300 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1301 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1302 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1303 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1304 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1305 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1306 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1311 * ice_write_tx_drbell_q_ctx
1312 * @hw: pointer to the hardware structure
1313 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1314 * @tx_drbell_q_index: the index of the doorbell queue
1316 * Converts doorbell queue context from sparse to dense structure and then
1317 * writes it to HW register space
1320 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1321 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1322 u32 tx_drbell_q_index)
1324 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1326 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1327 ice_tx_drbell_q_ctx_info);
1328 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1332 * ice_clear_tx_drbell_q_ctx
1333 * @hw: pointer to the hardware structure
1334 * @tx_drbell_q_index: the index of the doorbell queue to clear
1336 * Clears doorbell queue context in HW register space
1339 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1343 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1344 return ICE_ERR_PARAM;
1346 /* Clear each dword register separately */
1347 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1348 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1353 /* FW Admin Queue command wrappers */
1356 * ice_should_retry_sq_send_cmd
1357 * @opcode: AQ opcode
1359 * Decide if we should retry the send command routine for the ATQ, depending
1362 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1365 case ice_aqc_opc_get_link_topo:
1366 case ice_aqc_opc_lldp_stop:
1367 case ice_aqc_opc_lldp_start:
1368 case ice_aqc_opc_lldp_filter_ctrl:
1376 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1377 * @hw: pointer to the HW struct
1378 * @cq: pointer to the specific Control queue
1379 * @desc: prefilled descriptor describing the command
1380 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1381 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1382 * @cd: pointer to command details structure
1384 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1385 * Queue if the EBUSY AQ error is returned.
1387 static enum ice_status
1388 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1389 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1390 struct ice_sq_cd *cd)
1392 struct ice_aq_desc desc_cpy;
1393 enum ice_status status;
1394 bool is_cmd_for_retry;
1399 opcode = LE16_TO_CPU(desc->opcode);
1400 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1401 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1403 if (is_cmd_for_retry) {
1405 buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1407 return ICE_ERR_NO_MEMORY;
1410 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1411 ICE_NONDMA_TO_NONDMA);
1415 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1417 if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1418 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1422 ice_memcpy(buf, buf_cpy, buf_size,
1423 ICE_NONDMA_TO_NONDMA);
1425 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1426 ICE_NONDMA_TO_NONDMA);
1428 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1430 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1433 ice_free(hw, buf_cpy);
1439 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1440 * @hw: pointer to the HW struct
1441 * @desc: descriptor describing the command
1442 * @buf: buffer to use for indirect commands (NULL for direct commands)
1443 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1444 * @cd: pointer to command details structure
1446 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1449 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1450 u16 buf_size, struct ice_sq_cd *cd)
1452 if (hw->aq_send_cmd_fn) {
1453 enum ice_status status = ICE_ERR_NOT_READY;
1454 u16 retval = ICE_AQ_RC_OK;
1456 ice_acquire_lock(&hw->adminq.sq_lock);
1457 if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
1459 retval = LE16_TO_CPU(desc->retval);
1460 /* strip off FW internal code */
1463 if (retval == ICE_AQ_RC_OK)
1464 status = ICE_SUCCESS;
1466 status = ICE_ERR_AQ_ERROR;
1469 hw->adminq.sq_last_status = (enum ice_aq_err)retval;
1470 ice_release_lock(&hw->adminq.sq_lock);
1474 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1479 * @hw: pointer to the HW struct
1480 * @cd: pointer to command details structure or NULL
1482 * Get the firmware version (0x0001) from the admin queue commands
1484 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1486 struct ice_aqc_get_ver *resp;
1487 struct ice_aq_desc desc;
1488 enum ice_status status;
1490 resp = &desc.params.get_ver;
1492 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1494 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1497 hw->fw_branch = resp->fw_branch;
1498 hw->fw_maj_ver = resp->fw_major;
1499 hw->fw_min_ver = resp->fw_minor;
1500 hw->fw_patch = resp->fw_patch;
1501 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1502 hw->api_branch = resp->api_branch;
1503 hw->api_maj_ver = resp->api_major;
1504 hw->api_min_ver = resp->api_minor;
1505 hw->api_patch = resp->api_patch;
1512 * ice_aq_send_driver_ver
1513 * @hw: pointer to the HW struct
1514 * @dv: driver's major, minor version
1515 * @cd: pointer to command details structure or NULL
1517 * Send the driver version (0x0002) to the firmware
1520 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1521 struct ice_sq_cd *cd)
1523 struct ice_aqc_driver_ver *cmd;
1524 struct ice_aq_desc desc;
1527 cmd = &desc.params.driver_ver;
1530 return ICE_ERR_PARAM;
1532 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1534 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1535 cmd->major_ver = dv->major_ver;
1536 cmd->minor_ver = dv->minor_ver;
1537 cmd->build_ver = dv->build_ver;
1538 cmd->subbuild_ver = dv->subbuild_ver;
1541 while (len < sizeof(dv->driver_string) &&
1542 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1545 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1550 * @hw: pointer to the HW struct
1551 * @unloading: is the driver unloading itself
1553 * Tell the Firmware that we're shutting down the AdminQ and whether
1554 * or not the driver is unloading as well (0x0003).
1556 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1558 struct ice_aqc_q_shutdown *cmd;
1559 struct ice_aq_desc desc;
1561 cmd = &desc.params.q_shutdown;
1563 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1566 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1568 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1573 * @hw: pointer to the HW struct
1575 * @access: access type
1576 * @sdp_number: resource number
1577 * @timeout: the maximum time in ms that the driver may hold the resource
1578 * @cd: pointer to command details structure or NULL
1580 * Requests common resource using the admin queue commands (0x0008).
1581 * When attempting to acquire the Global Config Lock, the driver can
1582 * learn of three states:
1583 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1584 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1585 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1586 * successfully downloaded the package; the driver does
1587 * not have to download the package and can continue
1590 * Note that if the caller is in an acquire lock, perform action, release lock
1591 * phase of operation, it is possible that the FW may detect a timeout and issue
1592 * a CORER. In this case, the driver will receive a CORER interrupt and will
1593 * have to determine its cause. The calling thread that is handling this flow
1594 * will likely get an error propagated back to it indicating the Download
1595 * Package, Update Package or the Release Resource AQ commands timed out.
1597 static enum ice_status
1598 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1599 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1600 struct ice_sq_cd *cd)
1602 struct ice_aqc_req_res *cmd_resp;
1603 struct ice_aq_desc desc;
1604 enum ice_status status;
1606 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1608 cmd_resp = &desc.params.res_owner;
1610 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1612 cmd_resp->res_id = CPU_TO_LE16(res);
1613 cmd_resp->access_type = CPU_TO_LE16(access);
1614 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1615 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1618 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1620 /* The completion specifies the maximum time in ms that the driver
1621 * may hold the resource in the Timeout field.
1624 /* Global config lock response utilizes an additional status field.
1626 * If the Global config lock resource is held by some other driver, the
1627 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1628 * and the timeout field indicates the maximum time the current owner
1629 * of the resource has to free it.
1631 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1632 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1633 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1635 } else if (LE16_TO_CPU(cmd_resp->status) ==
1636 ICE_AQ_RES_GLBL_IN_PROG) {
1637 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1638 return ICE_ERR_AQ_ERROR;
1639 } else if (LE16_TO_CPU(cmd_resp->status) ==
1640 ICE_AQ_RES_GLBL_DONE) {
1641 return ICE_ERR_AQ_NO_WORK;
1644 /* invalid FW response, force a timeout immediately */
1646 return ICE_ERR_AQ_ERROR;
1649 /* If the resource is held by some other driver, the command completes
1650 * with a busy return value and the timeout field indicates the maximum
1651 * time the current owner of the resource has to free it.
1653 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1654 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1660 * ice_aq_release_res
1661 * @hw: pointer to the HW struct
1663 * @sdp_number: resource number
1664 * @cd: pointer to command details structure or NULL
1666 * release common resource using the admin queue commands (0x0009)
1668 static enum ice_status
1669 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1670 struct ice_sq_cd *cd)
1672 struct ice_aqc_req_res *cmd;
1673 struct ice_aq_desc desc;
1675 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1677 cmd = &desc.params.res_owner;
1679 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1681 cmd->res_id = CPU_TO_LE16(res);
1682 cmd->res_number = CPU_TO_LE32(sdp_number);
1684 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1689 * @hw: pointer to the HW structure
1691 * @access: access type (read or write)
1692 * @timeout: timeout in milliseconds
1694 * This function will attempt to acquire the ownership of a resource.
1697 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1698 enum ice_aq_res_access_type access, u32 timeout)
1700 #define ICE_RES_POLLING_DELAY_MS 10
1701 u32 delay = ICE_RES_POLLING_DELAY_MS;
1702 u32 time_left = timeout;
1703 enum ice_status status;
1705 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1707 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1709 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1710 * previously acquired the resource and performed any necessary updates;
1711 * in this case the caller does not obtain the resource and has no
1712 * further work to do.
1714 if (status == ICE_ERR_AQ_NO_WORK)
1715 goto ice_acquire_res_exit;
1718 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1720 /* If necessary, poll until the current lock owner timeouts */
1721 timeout = time_left;
1722 while (status && timeout && time_left) {
1723 ice_msec_delay(delay, true);
1724 timeout = (timeout > delay) ? timeout - delay : 0;
1725 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1727 if (status == ICE_ERR_AQ_NO_WORK)
1728 /* lock free, but no work to do */
1735 if (status && status != ICE_ERR_AQ_NO_WORK)
1736 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1738 ice_acquire_res_exit:
1739 if (status == ICE_ERR_AQ_NO_WORK) {
1740 if (access == ICE_RES_WRITE)
1741 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1743 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1750 * @hw: pointer to the HW structure
1753 * This function will release a resource using the proper Admin Command.
1755 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1757 enum ice_status status;
1758 u32 total_delay = 0;
1760 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1762 status = ice_aq_release_res(hw, res, 0, NULL);
1764 /* there are some rare cases when trying to release the resource
1765 * results in an admin queue timeout, so handle them correctly
1767 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1768 (total_delay < hw->adminq.sq_cmd_timeout)) {
1769 ice_msec_delay(1, true);
1770 status = ice_aq_release_res(hw, res, 0, NULL);
1776 * ice_aq_alloc_free_res - command to allocate/free resources
1777 * @hw: pointer to the HW struct
1778 * @num_entries: number of resource entries in buffer
1779 * @buf: Indirect buffer to hold data parameters and response
1780 * @buf_size: size of buffer for indirect commands
1781 * @opc: pass in the command opcode
1782 * @cd: pointer to command details structure or NULL
1784 * Helper function to allocate/free resources using the admin queue commands
1787 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1788 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1789 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1791 struct ice_aqc_alloc_free_res_cmd *cmd;
1792 struct ice_aq_desc desc;
1794 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1796 cmd = &desc.params.sw_res_ctrl;
1799 return ICE_ERR_PARAM;
1801 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
1802 return ICE_ERR_PARAM;
1804 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1806 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1808 cmd->num_entries = CPU_TO_LE16(num_entries);
1810 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1814 * ice_alloc_hw_res - allocate resource
1815 * @hw: pointer to the HW struct
1816 * @type: type of resource
1817 * @num: number of resources to allocate
1818 * @btm: allocate from bottom
1819 * @res: pointer to array that will receive the resources
1822 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1824 struct ice_aqc_alloc_free_res_elem *buf;
1825 enum ice_status status;
1828 buf_len = ice_struct_size(buf, elem, num);
1829 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1831 return ICE_ERR_NO_MEMORY;
1833 /* Prepare buffer to allocate resource. */
1834 buf->num_elems = CPU_TO_LE16(num);
1835 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1836 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1838 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1840 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1841 ice_aqc_opc_alloc_res, NULL);
1843 goto ice_alloc_res_exit;
1845 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
1846 ICE_NONDMA_TO_NONDMA);
1854 * ice_free_hw_res - free allocated HW resource
1855 * @hw: pointer to the HW struct
1856 * @type: type of resource to free
1857 * @num: number of resources
1858 * @res: pointer to array that contains the resources to free
1860 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1862 struct ice_aqc_alloc_free_res_elem *buf;
1863 enum ice_status status;
1866 buf_len = ice_struct_size(buf, elem, num);
1867 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1869 return ICE_ERR_NO_MEMORY;
1871 /* Prepare buffer to free resource. */
1872 buf->num_elems = CPU_TO_LE16(num);
1873 buf->res_type = CPU_TO_LE16(type);
1874 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
1875 ICE_NONDMA_TO_NONDMA);
1877 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1878 ice_aqc_opc_free_res, NULL);
1880 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1887 * ice_get_num_per_func - determine number of resources per PF
1888 * @hw: pointer to the HW structure
1889 * @max: value to be evenly split between each PF
1891 * Determine the number of valid functions by going through the bitmap returned
1892 * from parsing capabilities and use this to calculate the number of resources
1893 * per PF based on the max value passed in.
1895 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1899 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1900 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1901 ICE_CAPS_VALID_FUNCS_M);
1910 * ice_parse_common_caps - parse common device/function capabilities
1911 * @hw: pointer to the HW struct
1912 * @caps: pointer to common capabilities structure
1913 * @elem: the capability element to parse
1914 * @prefix: message prefix for tracing capabilities
1916 * Given a capability element, extract relevant details into the common
1917 * capability structure.
1919 * Returns: true if the capability matches one of the common capability ids,
1923 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1924 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1926 u32 logical_id = LE32_TO_CPU(elem->logical_id);
1927 u32 phys_id = LE32_TO_CPU(elem->phys_id);
1928 u32 number = LE32_TO_CPU(elem->number);
1929 u16 cap = LE16_TO_CPU(elem->cap);
1933 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1934 caps->valid_functions = number;
1935 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1936 caps->valid_functions);
1938 case ICE_AQC_CAPS_DCB:
1939 caps->dcb = (number == 1);
1940 caps->active_tc_bitmap = logical_id;
1941 caps->maxtc = phys_id;
1942 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1943 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1944 caps->active_tc_bitmap);
1945 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1947 case ICE_AQC_CAPS_RSS:
1948 caps->rss_table_size = number;
1949 caps->rss_table_entry_width = logical_id;
1950 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1951 caps->rss_table_size);
1952 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1953 caps->rss_table_entry_width);
1955 case ICE_AQC_CAPS_RXQS:
1956 caps->num_rxq = number;
1957 caps->rxq_first_id = phys_id;
1958 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1960 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1961 caps->rxq_first_id);
1963 case ICE_AQC_CAPS_TXQS:
1964 caps->num_txq = number;
1965 caps->txq_first_id = phys_id;
1966 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1968 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1969 caps->txq_first_id);
1971 case ICE_AQC_CAPS_MSIX:
1972 caps->num_msix_vectors = number;
1973 caps->msix_vector_first_id = phys_id;
1974 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1975 caps->num_msix_vectors);
1976 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1977 caps->msix_vector_first_id);
1979 case ICE_AQC_CAPS_NVM_MGMT:
1980 caps->sec_rev_disabled =
1981 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
1983 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
1984 caps->sec_rev_disabled);
1985 caps->update_disabled =
1986 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
1988 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
1989 caps->update_disabled);
1990 caps->nvm_unified_update =
1991 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1993 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1994 caps->nvm_unified_update);
1996 case ICE_AQC_CAPS_MAX_MTU:
1997 caps->max_mtu = number;
1998 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1999 prefix, caps->max_mtu);
2001 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2002 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2003 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2004 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2006 u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
2008 caps->ext_topo_dev_img_ver_high[index] = number;
2009 caps->ext_topo_dev_img_ver_low[index] = logical_id;
2010 caps->ext_topo_dev_img_part_num[index] =
2011 (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2012 ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2013 caps->ext_topo_dev_img_load_en[index] =
2014 (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2015 caps->ext_topo_dev_img_prog_en[index] =
2016 (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2017 ice_debug(hw, ICE_DBG_INIT,
2018 "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
2020 caps->ext_topo_dev_img_ver_high[index]);
2021 ice_debug(hw, ICE_DBG_INIT,
2022 "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
2024 caps->ext_topo_dev_img_ver_low[index]);
2025 ice_debug(hw, ICE_DBG_INIT,
2026 "%s: ext_topo_dev_img_part_num[%d] = %d\n",
2028 caps->ext_topo_dev_img_part_num[index]);
2029 ice_debug(hw, ICE_DBG_INIT,
2030 "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2032 caps->ext_topo_dev_img_load_en[index]);
2033 ice_debug(hw, ICE_DBG_INIT,
2034 "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2036 caps->ext_topo_dev_img_prog_en[index]);
2040 /* Not one of the recognized common capabilities */
2048 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2049 * @hw: pointer to the HW structure
2050 * @caps: pointer to capabilities structure to fix
2052 * Re-calculate the capabilities that are dependent on the number of physical
2053 * ports; i.e. some features are not supported or function differently on
2054 * devices with more than 4 ports.
2057 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2059 /* This assumes device capabilities are always scanned before function
2060 * capabilities during the initialization flow.
2062 if (hw->dev_caps.num_funcs > 4) {
2063 /* Max 4 TCs per port */
2065 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2071 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2072 * @hw: pointer to the HW struct
2073 * @func_p: pointer to function capabilities structure
2074 * @cap: pointer to the capability element to parse
2076 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2079 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2080 struct ice_aqc_list_caps_elem *cap)
2082 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2083 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2084 LE32_TO_CPU(cap->number));
2085 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2086 func_p->guar_num_vsi);
2090 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2091 * @hw: pointer to the HW struct
2092 * @func_p: pointer to function capabilities structure
2094 * Extract function capabilities for ICE_AQC_CAPS_FD.
2097 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2101 if (hw->dcf_enabled)
2103 reg_val = rd32(hw, GLQF_FD_SIZE);
2104 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2105 GLQF_FD_SIZE_FD_GSIZE_S;
2106 func_p->fd_fltr_guar =
2107 ice_get_num_per_func(hw, val);
2108 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2109 GLQF_FD_SIZE_FD_BSIZE_S;
2110 func_p->fd_fltr_best_effort = val;
2112 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2113 func_p->fd_fltr_guar);
2114 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2115 func_p->fd_fltr_best_effort);
2119 * ice_parse_func_caps - Parse function capabilities
2120 * @hw: pointer to the HW struct
2121 * @func_p: pointer to function capabilities structure
2122 * @buf: buffer containing the function capability records
2123 * @cap_count: the number of capabilities
2125 * Helper function to parse function (0x000A) capabilities list. For
2126 * capabilities shared between device and function, this relies on
2127 * ice_parse_common_caps.
2129 * Loop through the list of provided capabilities and extract the relevant
2130 * data into the function capabilities structured.
2133 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2134 void *buf, u32 cap_count)
2136 struct ice_aqc_list_caps_elem *cap_resp;
2139 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2141 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2143 for (i = 0; i < cap_count; i++) {
2144 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2147 found = ice_parse_common_caps(hw, &func_p->common_cap,
2148 &cap_resp[i], "func caps");
2151 case ICE_AQC_CAPS_VSI:
2152 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2154 case ICE_AQC_CAPS_FD:
2155 ice_parse_fdir_func_caps(hw, func_p);
2158 /* Don't list common capabilities as unknown */
2160 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2166 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2170 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2171 * @hw: pointer to the HW struct
2172 * @dev_p: pointer to device capabilities structure
2173 * @cap: capability element to parse
2175 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2178 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2179 struct ice_aqc_list_caps_elem *cap)
2181 u32 number = LE32_TO_CPU(cap->number);
2183 dev_p->num_funcs = ice_hweight32(number);
2184 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2189 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2190 * @hw: pointer to the HW struct
2191 * @dev_p: pointer to device capabilities structure
2192 * @cap: capability element to parse
2194 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2197 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2198 struct ice_aqc_list_caps_elem *cap)
2200 u32 number = LE32_TO_CPU(cap->number);
2202 dev_p->num_vsi_allocd_to_host = number;
2203 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2204 dev_p->num_vsi_allocd_to_host);
2208 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2209 * @hw: pointer to the HW struct
2210 * @dev_p: pointer to device capabilities structure
2211 * @cap: capability element to parse
2213 * Parse ICE_AQC_CAPS_FD for device capabilities.
2216 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2217 struct ice_aqc_list_caps_elem *cap)
2219 u32 number = LE32_TO_CPU(cap->number);
2221 dev_p->num_flow_director_fltr = number;
2222 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2223 dev_p->num_flow_director_fltr);
2227 * ice_parse_dev_caps - Parse device capabilities
2228 * @hw: pointer to the HW struct
2229 * @dev_p: pointer to device capabilities structure
2230 * @buf: buffer containing the device capability records
2231 * @cap_count: the number of capabilities
2233 * Helper device to parse device (0x000B) capabilities list. For
2234 * capabilities shared between device and function, this relies on
2235 * ice_parse_common_caps.
2237 * Loop through the list of provided capabilities and extract the relevant
2238 * data into the device capabilities structured.
2241 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2242 void *buf, u32 cap_count)
2244 struct ice_aqc_list_caps_elem *cap_resp;
2247 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2249 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2251 for (i = 0; i < cap_count; i++) {
2252 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2255 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2256 &cap_resp[i], "dev caps");
2259 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2260 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2262 case ICE_AQC_CAPS_VSI:
2263 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2265 case ICE_AQC_CAPS_FD:
2266 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2269 /* Don't list common capabilities as unknown */
2271 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2277 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2281 * ice_aq_list_caps - query function/device capabilities
2282 * @hw: pointer to the HW struct
2283 * @buf: a buffer to hold the capabilities
2284 * @buf_size: size of the buffer
2285 * @cap_count: if not NULL, set to the number of capabilities reported
2286 * @opc: capabilities type to discover, device or function
2287 * @cd: pointer to command details structure or NULL
2289 * Get the function (0x000A) or device (0x000B) capabilities description from
2290 * firmware and store it in the buffer.
2292 * If the cap_count pointer is not NULL, then it is set to the number of
2293 * capabilities firmware will report. Note that if the buffer size is too
2294 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2295 * cap_count will still be updated in this case. It is recommended that the
2296 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2297 * firmware could return) to avoid this.
2299 static enum ice_status
2300 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2301 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2303 struct ice_aqc_list_caps *cmd;
2304 struct ice_aq_desc desc;
2305 enum ice_status status;
2307 cmd = &desc.params.get_cap;
2309 if (opc != ice_aqc_opc_list_func_caps &&
2310 opc != ice_aqc_opc_list_dev_caps)
2311 return ICE_ERR_PARAM;
2313 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2314 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2317 *cap_count = LE32_TO_CPU(cmd->count);
2323 * ice_discover_dev_caps - Read and extract device capabilities
2324 * @hw: pointer to the hardware structure
2325 * @dev_caps: pointer to device capabilities structure
2327 * Read the device capabilities and extract them into the dev_caps structure
2330 static enum ice_status
2331 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2333 enum ice_status status;
2337 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2339 return ICE_ERR_NO_MEMORY;
2341 /* Although the driver doesn't know the number of capabilities the
2342 * device will return, we can simply send a 4KB buffer, the maximum
2343 * possible size that firmware can return.
2345 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2347 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2348 ice_aqc_opc_list_dev_caps, NULL);
2350 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2357 * ice_discover_func_caps - Read and extract function capabilities
2358 * @hw: pointer to the hardware structure
2359 * @func_caps: pointer to function capabilities structure
2361 * Read the function capabilities and extract them into the func_caps structure
2364 static enum ice_status
2365 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2367 enum ice_status status;
2371 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2373 return ICE_ERR_NO_MEMORY;
2375 /* Although the driver doesn't know the number of capabilities the
2376 * device will return, we can simply send a 4KB buffer, the maximum
2377 * possible size that firmware can return.
2379 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2381 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2382 ice_aqc_opc_list_func_caps, NULL);
2384 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2391 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2392 * @hw: pointer to the hardware structure
2394 void ice_set_safe_mode_caps(struct ice_hw *hw)
2396 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2397 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2398 struct ice_hw_common_caps cached_caps;
2401 /* cache some func_caps values that should be restored after memset */
2402 cached_caps = func_caps->common_cap;
2404 /* unset func capabilities */
2405 memset(func_caps, 0, sizeof(*func_caps));
2407 #define ICE_RESTORE_FUNC_CAP(name) \
2408 func_caps->common_cap.name = cached_caps.name
2410 /* restore cached values */
2411 ICE_RESTORE_FUNC_CAP(valid_functions);
2412 ICE_RESTORE_FUNC_CAP(txq_first_id);
2413 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2414 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2415 ICE_RESTORE_FUNC_CAP(max_mtu);
2416 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2418 /* one Tx and one Rx queue in safe mode */
2419 func_caps->common_cap.num_rxq = 1;
2420 func_caps->common_cap.num_txq = 1;
2422 /* two MSIX vectors, one for traffic and one for misc causes */
2423 func_caps->common_cap.num_msix_vectors = 2;
2424 func_caps->guar_num_vsi = 1;
2426 /* cache some dev_caps values that should be restored after memset */
2427 cached_caps = dev_caps->common_cap;
2428 num_funcs = dev_caps->num_funcs;
2430 /* unset dev capabilities */
2431 memset(dev_caps, 0, sizeof(*dev_caps));
2433 #define ICE_RESTORE_DEV_CAP(name) \
2434 dev_caps->common_cap.name = cached_caps.name
2436 /* restore cached values */
2437 ICE_RESTORE_DEV_CAP(valid_functions);
2438 ICE_RESTORE_DEV_CAP(txq_first_id);
2439 ICE_RESTORE_DEV_CAP(rxq_first_id);
2440 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2441 ICE_RESTORE_DEV_CAP(max_mtu);
2442 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2443 dev_caps->num_funcs = num_funcs;
2445 /* one Tx and one Rx queue per function in safe mode */
2446 dev_caps->common_cap.num_rxq = num_funcs;
2447 dev_caps->common_cap.num_txq = num_funcs;
2449 /* two MSIX vectors per function */
2450 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2454 * ice_get_caps - get info about the HW
2455 * @hw: pointer to the hardware structure
2457 enum ice_status ice_get_caps(struct ice_hw *hw)
2459 enum ice_status status;
2461 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2465 return ice_discover_func_caps(hw, &hw->func_caps);
2469 * ice_aq_manage_mac_write - manage MAC address write command
2470 * @hw: pointer to the HW struct
2471 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2472 * @flags: flags to control write behavior
2473 * @cd: pointer to command details structure or NULL
2475 * This function is used to write MAC address to the NVM (0x0108).
2478 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2479 struct ice_sq_cd *cd)
2481 struct ice_aqc_manage_mac_write *cmd;
2482 struct ice_aq_desc desc;
2484 cmd = &desc.params.mac_write;
2485 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2488 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2490 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2494 * ice_aq_clear_pxe_mode
2495 * @hw: pointer to the HW struct
2497 * Tell the firmware that the driver is taking over from PXE (0x0110).
2499 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2501 struct ice_aq_desc desc;
2503 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2504 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2506 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2510 * ice_clear_pxe_mode - clear pxe operations mode
2511 * @hw: pointer to the HW struct
2513 * Make sure all PXE mode settings are cleared, including things
2514 * like descriptor fetch/write-back mode.
2516 void ice_clear_pxe_mode(struct ice_hw *hw)
2518 if (ice_check_sq_alive(hw, &hw->adminq))
2519 ice_aq_clear_pxe_mode(hw);
2523 * ice_aq_set_port_params - set physical port parameters.
2524 * @pi: pointer to the port info struct
2525 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2526 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2527 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2528 * @double_vlan: if set double VLAN is enabled
2529 * @cd: pointer to command details structure or NULL
2531 * Set Physical port parameters (0x0203)
2534 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2535 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2536 struct ice_sq_cd *cd)
2539 struct ice_aqc_set_port_params *cmd;
2540 struct ice_hw *hw = pi->hw;
2541 struct ice_aq_desc desc;
2544 cmd = &desc.params.set_port_params;
2546 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2547 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2549 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2551 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2553 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2554 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2556 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2560 * ice_get_link_speed_based_on_phy_type - returns link speed
2561 * @phy_type_low: lower part of phy_type
2562 * @phy_type_high: higher part of phy_type
2564 * This helper function will convert an entry in PHY type structure
2565 * [phy_type_low, phy_type_high] to its corresponding link speed.
2566 * Note: In the structure of [phy_type_low, phy_type_high], there should
2567 * be one bit set, as this function will convert one PHY type to its
2569 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2570 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2573 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2575 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2576 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2578 switch (phy_type_low) {
2579 case ICE_PHY_TYPE_LOW_100BASE_TX:
2580 case ICE_PHY_TYPE_LOW_100M_SGMII:
2581 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2583 case ICE_PHY_TYPE_LOW_1000BASE_T:
2584 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2585 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2586 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2587 case ICE_PHY_TYPE_LOW_1G_SGMII:
2588 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2590 case ICE_PHY_TYPE_LOW_2500BASE_T:
2591 case ICE_PHY_TYPE_LOW_2500BASE_X:
2592 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2593 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2595 case ICE_PHY_TYPE_LOW_5GBASE_T:
2596 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2597 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2599 case ICE_PHY_TYPE_LOW_10GBASE_T:
2600 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2601 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2602 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2603 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2604 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2605 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2606 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2608 case ICE_PHY_TYPE_LOW_25GBASE_T:
2609 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2610 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2611 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2612 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2613 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2614 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2615 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2616 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2617 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2618 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2619 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2621 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2622 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2623 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2624 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2625 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2626 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2627 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2629 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2630 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2631 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2632 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2633 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2634 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2635 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2636 case ICE_PHY_TYPE_LOW_50G_AUI2:
2637 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2638 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2639 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2640 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2641 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2642 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2643 case ICE_PHY_TYPE_LOW_50G_AUI1:
2644 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2646 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2647 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2648 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2649 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2650 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2651 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2652 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2653 case ICE_PHY_TYPE_LOW_100G_AUI4:
2654 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2655 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2656 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2657 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2658 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2659 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2662 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2666 switch (phy_type_high) {
2667 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2668 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2669 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2670 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2671 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2672 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2675 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2679 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2680 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2681 return ICE_AQ_LINK_SPEED_UNKNOWN;
2682 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2683 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2684 return ICE_AQ_LINK_SPEED_UNKNOWN;
2685 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2686 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2687 return speed_phy_type_low;
2689 return speed_phy_type_high;
2693 * ice_update_phy_type
2694 * @phy_type_low: pointer to the lower part of phy_type
2695 * @phy_type_high: pointer to the higher part of phy_type
2696 * @link_speeds_bitmap: targeted link speeds bitmap
2698 * Note: For the link_speeds_bitmap structure, you can check it at
2699 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2700 * link_speeds_bitmap include multiple speeds.
2702 * Each entry in this [phy_type_low, phy_type_high] structure will
2703 * present a certain link speed. This helper function will turn on bits
2704 * in [phy_type_low, phy_type_high] structure based on the value of
2705 * link_speeds_bitmap input parameter.
2708 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2709 u16 link_speeds_bitmap)
2716 /* We first check with low part of phy_type */
2717 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2718 pt_low = BIT_ULL(index);
2719 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2721 if (link_speeds_bitmap & speed)
2722 *phy_type_low |= BIT_ULL(index);
2725 /* We then check with high part of phy_type */
2726 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2727 pt_high = BIT_ULL(index);
2728 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2730 if (link_speeds_bitmap & speed)
2731 *phy_type_high |= BIT_ULL(index);
2736 * ice_aq_set_phy_cfg
2737 * @hw: pointer to the HW struct
2738 * @pi: port info structure of the interested logical port
2739 * @cfg: structure with PHY configuration data to be set
2740 * @cd: pointer to command details structure or NULL
2742 * Set the various PHY configuration parameters supported on the Port.
2743 * One or more of the Set PHY config parameters may be ignored in an MFP
2744 * mode as the PF may not have the privilege to set some of the PHY Config
2745 * parameters. This status will be indicated by the command response (0x0601).
2748 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2749 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2751 struct ice_aq_desc desc;
2752 enum ice_status status;
2755 return ICE_ERR_PARAM;
2757 /* Ensure that only valid bits of cfg->caps can be turned on. */
2758 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2759 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2762 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2765 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2766 desc.params.set_phy.lport_num = pi->lport;
2767 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2769 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2770 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2771 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2772 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2773 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2774 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2775 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2776 cfg->low_power_ctrl_an);
2777 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2778 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2779 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2782 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2784 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2785 status = ICE_SUCCESS;
2788 pi->phy.curr_user_phy_cfg = *cfg;
2794 * ice_update_link_info - update status of the HW network link
2795 * @pi: port info structure of the interested logical port
2797 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2799 struct ice_link_status *li;
2800 enum ice_status status;
2803 return ICE_ERR_PARAM;
2805 li = &pi->phy.link_info;
2807 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2811 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2812 struct ice_aqc_get_phy_caps_data *pcaps;
2816 pcaps = (struct ice_aqc_get_phy_caps_data *)
2817 ice_malloc(hw, sizeof(*pcaps));
2819 return ICE_ERR_NO_MEMORY;
2821 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2824 if (status == ICE_SUCCESS)
2825 ice_memcpy(li->module_type, &pcaps->module_type,
2826 sizeof(li->module_type),
2827 ICE_NONDMA_TO_NONDMA);
2829 ice_free(hw, pcaps);
2836 * ice_cache_phy_user_req
2837 * @pi: port information structure
2838 * @cache_data: PHY logging data
2839 * @cache_mode: PHY logging mode
2841 * Log the user request on (FC, FEC, SPEED) for later user.
2844 ice_cache_phy_user_req(struct ice_port_info *pi,
2845 struct ice_phy_cache_mode_data cache_data,
2846 enum ice_phy_cache_mode cache_mode)
2851 switch (cache_mode) {
2853 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2855 case ICE_SPEED_MODE:
2856 pi->phy.curr_user_speed_req =
2857 cache_data.data.curr_user_speed_req;
2860 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2868 * ice_caps_to_fc_mode
2869 * @caps: PHY capabilities
2871 * Convert PHY FC capabilities to ice FC mode
2873 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2875 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2876 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2879 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2880 return ICE_FC_TX_PAUSE;
2882 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2883 return ICE_FC_RX_PAUSE;
2889 * ice_caps_to_fec_mode
2890 * @caps: PHY capabilities
2891 * @fec_options: Link FEC options
2893 * Convert PHY FEC capabilities to ice FEC mode
2895 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2897 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2898 return ICE_FEC_AUTO;
2900 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2901 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2902 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2903 ICE_AQC_PHY_FEC_25G_KR_REQ))
2904 return ICE_FEC_BASER;
2906 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2907 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2908 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2911 return ICE_FEC_NONE;
2915 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2916 * @pi: port information structure
2917 * @cfg: PHY configuration data to set FC mode
2918 * @req_mode: FC mode to configure
2920 static enum ice_status
2921 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2922 enum ice_fc_mode req_mode)
2924 struct ice_phy_cache_mode_data cache_data;
2925 u8 pause_mask = 0x0;
2928 return ICE_ERR_BAD_PTR;
2933 struct ice_aqc_get_phy_caps_data *pcaps;
2934 enum ice_status status;
2936 pcaps = (struct ice_aqc_get_phy_caps_data *)
2937 ice_malloc(pi->hw, sizeof(*pcaps));
2939 return ICE_ERR_NO_MEMORY;
2941 /* Query the value of FC that both the NIC and attached media
2944 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2947 ice_free(pi->hw, pcaps);
2951 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2952 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2954 ice_free(pi->hw, pcaps);
2958 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2959 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2961 case ICE_FC_RX_PAUSE:
2962 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2964 case ICE_FC_TX_PAUSE:
2965 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2971 /* clear the old pause settings */
2972 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2973 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2975 /* set the new capabilities */
2976 cfg->caps |= pause_mask;
2978 /* Cache user FC request */
2979 cache_data.data.curr_user_fc_req = req_mode;
2980 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2987 * @pi: port information structure
2988 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2989 * @ena_auto_link_update: enable automatic link update
2991 * Set the requested flow control mode.
2994 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2996 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2997 struct ice_aqc_get_phy_caps_data *pcaps;
2998 enum ice_status status;
3001 if (!pi || !aq_failures)
3002 return ICE_ERR_BAD_PTR;
3007 pcaps = (struct ice_aqc_get_phy_caps_data *)
3008 ice_malloc(hw, sizeof(*pcaps));
3010 return ICE_ERR_NO_MEMORY;
3012 /* Get the current PHY config */
3013 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3017 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3021 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3023 /* Configure the set PHY data */
3024 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3026 if (status != ICE_ERR_BAD_PTR)
3027 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3032 /* If the capabilities have changed, then set the new config */
3033 if (cfg.caps != pcaps->caps) {
3034 int retry_count, retry_max = 10;
3036 /* Auto restart link so settings take effect */
3037 if (ena_auto_link_update)
3038 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3040 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3042 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3046 /* Update the link info
3047 * It sometimes takes a really long time for link to
3048 * come back from the atomic reset. Thus, we wait a
3051 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3052 status = ice_update_link_info(pi);
3054 if (status == ICE_SUCCESS)
3057 ice_msec_delay(100, true);
3061 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3065 ice_free(hw, pcaps);
3070 * ice_phy_caps_equals_cfg
3071 * @phy_caps: PHY capabilities
3072 * @phy_cfg: PHY configuration
3074 * Helper function to determine if PHY capabilities matches PHY
3078 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3079 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3081 u8 caps_mask, cfg_mask;
3083 if (!phy_caps || !phy_cfg)
3086 /* These bits are not common between capabilities and configuration.
3087 * Do not use them to determine equality.
3089 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3090 ICE_AQC_PHY_EN_MOD_QUAL);
3091 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3093 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3094 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3095 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3096 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3097 phy_caps->eee_cap != phy_cfg->eee_cap ||
3098 phy_caps->eeer_value != phy_cfg->eeer_value ||
3099 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3106 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3107 * @pi: port information structure
3108 * @caps: PHY ability structure to copy date from
3109 * @cfg: PHY configuration structure to copy data to
3111 * Helper function to copy AQC PHY get ability data to PHY set configuration
3115 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3116 struct ice_aqc_get_phy_caps_data *caps,
3117 struct ice_aqc_set_phy_cfg_data *cfg)
3119 if (!pi || !caps || !cfg)
3122 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3123 cfg->phy_type_low = caps->phy_type_low;
3124 cfg->phy_type_high = caps->phy_type_high;
3125 cfg->caps = caps->caps;
3126 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3127 cfg->eee_cap = caps->eee_cap;
3128 cfg->eeer_value = caps->eeer_value;
3129 cfg->link_fec_opt = caps->link_fec_options;
3130 cfg->module_compliance_enforcement =
3131 caps->module_compliance_enforcement;
3135 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3136 * @pi: port information structure
3137 * @cfg: PHY configuration data to set FEC mode
3138 * @fec: FEC mode to configure
3141 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3142 enum ice_fec_mode fec)
3144 struct ice_aqc_get_phy_caps_data *pcaps;
3145 enum ice_status status = ICE_SUCCESS;
3149 return ICE_ERR_BAD_PTR;
3153 pcaps = (struct ice_aqc_get_phy_caps_data *)
3154 ice_malloc(hw, sizeof(*pcaps));
3156 return ICE_ERR_NO_MEMORY;
3158 status = ice_aq_get_phy_caps(pi, false,
3159 (ice_fw_supports_report_dflt_cfg(hw) ?
3160 ICE_AQC_REPORT_DFLT_CFG :
3161 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3166 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3167 cfg->link_fec_opt = pcaps->link_fec_options;
3171 /* Clear RS bits, and AND BASE-R ability
3172 * bits and OR request bits.
3174 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3175 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3176 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3177 ICE_AQC_PHY_FEC_25G_KR_REQ;
3180 /* Clear BASE-R bits, and AND RS ability
3181 * bits and OR request bits.
3183 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3184 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3185 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3188 /* Clear all FEC option bits. */
3189 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3192 /* AND auto FEC bit, and all caps bits. */
3193 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3194 cfg->link_fec_opt |= pcaps->link_fec_options;
3197 status = ICE_ERR_PARAM;
3201 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3202 !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3203 struct ice_link_default_override_tlv tlv;
3205 if (ice_get_link_default_override(&tlv, pi))
3208 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3209 (tlv.options & ICE_LINK_OVERRIDE_EN))
3210 cfg->link_fec_opt = tlv.fec_options;
3214 ice_free(hw, pcaps);
3220 * ice_get_link_status - get status of the HW network link
3221 * @pi: port information structure
3222 * @link_up: pointer to bool (true/false = linkup/linkdown)
3224 * Variable link_up is true if link is up, false if link is down.
3225 * The variable link_up is invalid if status is non zero. As a
3226 * result of this call, link status reporting becomes enabled
3228 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3230 struct ice_phy_info *phy_info;
3231 enum ice_status status = ICE_SUCCESS;
3233 if (!pi || !link_up)
3234 return ICE_ERR_PARAM;
3236 phy_info = &pi->phy;
3238 if (phy_info->get_link_info) {
3239 status = ice_update_link_info(pi);
3242 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3246 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3252 * ice_aq_set_link_restart_an
3253 * @pi: pointer to the port information structure
3254 * @ena_link: if true: enable link, if false: disable link
3255 * @cd: pointer to command details structure or NULL
3257 * Sets up the link and restarts the Auto-Negotiation over the link.
3260 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3261 struct ice_sq_cd *cd)
3263 struct ice_aqc_restart_an *cmd;
3264 struct ice_aq_desc desc;
3266 cmd = &desc.params.restart_an;
3268 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3270 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3271 cmd->lport_num = pi->lport;
3273 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3275 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3277 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3281 * ice_aq_set_event_mask
3282 * @hw: pointer to the HW struct
3283 * @port_num: port number of the physical function
3284 * @mask: event mask to be set
3285 * @cd: pointer to command details structure or NULL
3287 * Set event mask (0x0613)
3290 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3291 struct ice_sq_cd *cd)
3293 struct ice_aqc_set_event_mask *cmd;
3294 struct ice_aq_desc desc;
3296 cmd = &desc.params.set_event_mask;
3298 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3300 cmd->lport_num = port_num;
3302 cmd->event_mask = CPU_TO_LE16(mask);
3303 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3307 * ice_aq_set_mac_loopback
3308 * @hw: pointer to the HW struct
3309 * @ena_lpbk: Enable or Disable loopback
3310 * @cd: pointer to command details structure or NULL
3312 * Enable/disable loopback on a given port
3315 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3317 struct ice_aqc_set_mac_lb *cmd;
3318 struct ice_aq_desc desc;
3320 cmd = &desc.params.set_mac_lb;
3322 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3324 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3326 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3330 * ice_aq_set_port_id_led
3331 * @pi: pointer to the port information
3332 * @is_orig_mode: is this LED set to original mode (by the net-list)
3333 * @cd: pointer to command details structure or NULL
3335 * Set LED value for the given port (0x06e9)
3338 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3339 struct ice_sq_cd *cd)
3341 struct ice_aqc_set_port_id_led *cmd;
3342 struct ice_hw *hw = pi->hw;
3343 struct ice_aq_desc desc;
3345 cmd = &desc.params.set_port_id_led;
3347 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3350 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3352 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3354 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3359 * @hw: pointer to the HW struct
3360 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3361 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3362 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3364 * @set_page: set or ignore the page
3365 * @data: pointer to data buffer to be read/written to the I2C device.
3366 * @length: 1-16 for read, 1 for write.
3367 * @write: 0 read, 1 for write.
3368 * @cd: pointer to command details structure or NULL
3370 * Read/Write SFF EEPROM (0x06EE)
3373 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3374 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3375 bool write, struct ice_sq_cd *cd)
3377 struct ice_aqc_sff_eeprom *cmd;
3378 struct ice_aq_desc desc;
3379 enum ice_status status;
3381 if (!data || (mem_addr & 0xff00))
3382 return ICE_ERR_PARAM;
3384 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3385 cmd = &desc.params.read_write_sff_param;
3386 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3387 cmd->lport_num = (u8)(lport & 0xff);
3388 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3389 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3390 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3392 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3393 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3394 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3395 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3397 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3399 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3404 * ice_aq_prog_topo_dev_nvm
3405 * @hw: pointer to the hardware structure
3406 * @topo_params: pointer to structure storing topology parameters for a device
3407 * @cd: pointer to command details structure or NULL
3409 * Program Topology Device NVM (0x06F2)
3413 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
3414 struct ice_aqc_link_topo_params *topo_params,
3415 struct ice_sq_cd *cd)
3417 struct ice_aqc_prog_topo_dev_nvm *cmd;
3418 struct ice_aq_desc desc;
3420 cmd = &desc.params.prog_topo_dev_nvm;
3422 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
3424 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3425 ICE_NONDMA_TO_NONDMA);
3427 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3431 * ice_aq_read_topo_dev_nvm
3432 * @hw: pointer to the hardware structure
3433 * @topo_params: pointer to structure storing topology parameters for a device
3434 * @start_address: byte offset in the topology device NVM
3435 * @data: pointer to data buffer
3436 * @data_size: number of bytes to be read from the topology device NVM
3437 * @cd: pointer to command details structure or NULL
3438 * Read Topology Device NVM (0x06F3)
3442 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
3443 struct ice_aqc_link_topo_params *topo_params,
3444 u32 start_address, u8 *data, u8 data_size,
3445 struct ice_sq_cd *cd)
3447 struct ice_aqc_read_topo_dev_nvm *cmd;
3448 struct ice_aq_desc desc;
3449 enum ice_status status;
3451 if (!data || data_size == 0 ||
3452 data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
3453 return ICE_ERR_PARAM;
3455 cmd = &desc.params.read_topo_dev_nvm;
3457 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
3459 desc.datalen = data_size;
3460 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
3461 ICE_NONDMA_TO_NONDMA);
3462 cmd->start_address = CPU_TO_LE32(start_address);
3464 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3468 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
3474 * __ice_aq_get_set_rss_lut
3475 * @hw: pointer to the hardware structure
3476 * @params: RSS LUT parameters
3477 * @set: set true to set the table, false to get the table
3479 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3481 static enum ice_status
3482 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3484 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3485 struct ice_aqc_get_set_rss_lut *cmd_resp;
3486 struct ice_aq_desc desc;
3487 enum ice_status status;
3491 return ICE_ERR_PARAM;
3493 vsi_handle = params->vsi_handle;
3496 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3497 return ICE_ERR_PARAM;
3499 lut_size = params->lut_size;
3500 lut_type = params->lut_type;
3501 glob_lut_idx = params->global_lut_id;
3502 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3504 cmd_resp = &desc.params.get_set_rss_lut;
3507 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3508 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3513 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3514 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3515 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3516 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3519 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3520 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3521 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3522 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3523 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3526 status = ICE_ERR_PARAM;
3527 goto ice_aq_get_set_rss_lut_exit;
3530 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3531 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3532 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3535 goto ice_aq_get_set_rss_lut_send;
3536 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3538 goto ice_aq_get_set_rss_lut_send;
3540 goto ice_aq_get_set_rss_lut_send;
3543 /* LUT size is only valid for Global and PF table types */
3545 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3546 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3547 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3548 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3550 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3551 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3552 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3553 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3555 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3556 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3557 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3558 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3559 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3564 status = ICE_ERR_PARAM;
3565 goto ice_aq_get_set_rss_lut_exit;
3568 ice_aq_get_set_rss_lut_send:
3569 cmd_resp->flags = CPU_TO_LE16(flags);
3570 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3572 ice_aq_get_set_rss_lut_exit:
3577 * ice_aq_get_rss_lut
3578 * @hw: pointer to the hardware structure
3579 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3581 * get the RSS lookup table, PF or VSI type
3584 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3586 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3590 * ice_aq_set_rss_lut
3591 * @hw: pointer to the hardware structure
3592 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3594 * set the RSS lookup table, PF or VSI type
3597 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3599 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3603 * __ice_aq_get_set_rss_key
3604 * @hw: pointer to the HW struct
3605 * @vsi_id: VSI FW index
3606 * @key: pointer to key info struct
3607 * @set: set true to set the key, false to get the key
3609 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3612 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3613 struct ice_aqc_get_set_rss_keys *key,
3616 struct ice_aqc_get_set_rss_key *cmd_resp;
3617 u16 key_size = sizeof(*key);
3618 struct ice_aq_desc desc;
3620 cmd_resp = &desc.params.get_set_rss_key;
3623 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3624 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3626 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3629 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3630 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3631 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3632 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3634 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3638 * ice_aq_get_rss_key
3639 * @hw: pointer to the HW struct
3640 * @vsi_handle: software VSI handle
3641 * @key: pointer to key info struct
3643 * get the RSS key per VSI
3646 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3647 struct ice_aqc_get_set_rss_keys *key)
3649 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3650 return ICE_ERR_PARAM;
3652 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3657 * ice_aq_set_rss_key
3658 * @hw: pointer to the HW struct
3659 * @vsi_handle: software VSI handle
3660 * @keys: pointer to key info struct
3662 * set the RSS key per VSI
3665 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3666 struct ice_aqc_get_set_rss_keys *keys)
3668 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3669 return ICE_ERR_PARAM;
3671 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3676 * ice_aq_add_lan_txq
3677 * @hw: pointer to the hardware structure
3678 * @num_qgrps: Number of added queue groups
3679 * @qg_list: list of queue groups to be added
3680 * @buf_size: size of buffer for indirect command
3681 * @cd: pointer to command details structure or NULL
3683 * Add Tx LAN queue (0x0C30)
3686 * Prior to calling add Tx LAN queue:
3687 * Initialize the following as part of the Tx queue context:
3688 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3689 * Cache profile and Packet shaper profile.
3691 * After add Tx LAN queue AQ command is completed:
3692 * Interrupts should be associated with specific queues,
3693 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3697 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3698 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3699 struct ice_sq_cd *cd)
3701 struct ice_aqc_add_tx_qgrp *list;
3702 struct ice_aqc_add_txqs *cmd;
3703 struct ice_aq_desc desc;
3704 u16 i, sum_size = 0;
3706 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3708 cmd = &desc.params.add_txqs;
3710 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3713 return ICE_ERR_PARAM;
3715 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3716 return ICE_ERR_PARAM;
3718 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3719 sum_size += ice_struct_size(list, txqs, list->num_txqs);
3720 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3724 if (buf_size != sum_size)
3725 return ICE_ERR_PARAM;
3727 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3729 cmd->num_qgrps = num_qgrps;
3731 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3735 * ice_aq_dis_lan_txq
3736 * @hw: pointer to the hardware structure
3737 * @num_qgrps: number of groups in the list
3738 * @qg_list: the list of groups to disable
3739 * @buf_size: the total size of the qg_list buffer in bytes
3740 * @rst_src: if called due to reset, specifies the reset source
3741 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3742 * @cd: pointer to command details structure or NULL
3744 * Disable LAN Tx queue (0x0C31)
3746 static enum ice_status
3747 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3748 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3749 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3750 struct ice_sq_cd *cd)
3752 struct ice_aqc_dis_txq_item *item;
3753 struct ice_aqc_dis_txqs *cmd;
3754 struct ice_aq_desc desc;
3755 enum ice_status status;
3758 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3759 cmd = &desc.params.dis_txqs;
3760 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3762 /* qg_list can be NULL only in VM/VF reset flow */
3763 if (!qg_list && !rst_src)
3764 return ICE_ERR_PARAM;
3766 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3767 return ICE_ERR_PARAM;
3769 cmd->num_entries = num_qgrps;
3771 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3772 ICE_AQC_Q_DIS_TIMEOUT_M);
3776 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3777 cmd->vmvf_and_timeout |=
3778 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3785 /* flush pipe on time out */
3786 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3787 /* If no queue group info, we are in a reset flow. Issue the AQ */
3791 /* set RD bit to indicate that command buffer is provided by the driver
3792 * and it needs to be read by the firmware
3794 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3796 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3797 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
3799 /* If the num of queues is even, add 2 bytes of padding */
3800 if ((item->num_qs % 2) == 0)
3805 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3809 return ICE_ERR_PARAM;
3812 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3815 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3816 vmvf_num, hw->adminq.sq_last_status);
3818 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3819 LE16_TO_CPU(qg_list[0].q_id[0]),
3820 hw->adminq.sq_last_status);
3826 * ice_aq_move_recfg_lan_txq
3827 * @hw: pointer to the hardware structure
3828 * @num_qs: number of queues to move/reconfigure
3829 * @is_move: true if this operation involves node movement
3830 * @is_tc_change: true if this operation involves a TC change
3831 * @subseq_call: true if this operation is a subsequent call
3832 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3833 * @timeout: timeout in units of 100 usec (valid values 0-50)
3834 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3835 * @buf: struct containing src/dest TEID and per-queue info
3836 * @buf_size: size of buffer for indirect command
3837 * @txqs_moved: out param, number of queues successfully moved
3838 * @cd: pointer to command details structure or NULL
3840 * Move / Reconfigure Tx LAN queues (0x0C32)
3843 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3844 bool is_tc_change, bool subseq_call, bool flush_pipe,
3845 u8 timeout, u32 *blocked_cgds,
3846 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3847 u8 *txqs_moved, struct ice_sq_cd *cd)
3849 struct ice_aqc_move_txqs *cmd;
3850 struct ice_aq_desc desc;
3851 enum ice_status status;
3853 cmd = &desc.params.move_txqs;
3854 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3856 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3857 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3858 return ICE_ERR_PARAM;
3860 if (is_tc_change && !flush_pipe && !blocked_cgds)
3861 return ICE_ERR_PARAM;
3863 if (!is_move && !is_tc_change)
3864 return ICE_ERR_PARAM;
3866 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3869 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3872 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3875 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3878 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3880 cmd->num_qs = num_qs;
3881 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3882 ICE_AQC_Q_CMD_TIMEOUT_M);
3884 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3886 if (!status && txqs_moved)
3887 *txqs_moved = cmd->num_qs;
3889 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3890 is_tc_change && !flush_pipe)
3891 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3896 /* End of FW Admin Queue command wrappers */
3899 * ice_write_byte - write a byte to a packed context structure
3900 * @src_ctx: the context structure to read from
3901 * @dest_ctx: the context to be written to
3902 * @ce_info: a description of the struct to be filled
3905 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3907 u8 src_byte, dest_byte, mask;
3911 /* copy from the next struct field */
3912 from = src_ctx + ce_info->offset;
3914 /* prepare the bits and mask */
3915 shift_width = ce_info->lsb % 8;
3916 mask = (u8)(BIT(ce_info->width) - 1);
3921 /* shift to correct alignment */
3922 mask <<= shift_width;
3923 src_byte <<= shift_width;
3925 /* get the current bits from the target bit string */
3926 dest = dest_ctx + (ce_info->lsb / 8);
3928 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3930 dest_byte &= ~mask; /* get the bits not changing */
3931 dest_byte |= src_byte; /* add in the new bits */
3933 /* put it all back */
3934 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3938 * ice_write_word - write a word to a packed context structure
3939 * @src_ctx: the context structure to read from
3940 * @dest_ctx: the context to be written to
3941 * @ce_info: a description of the struct to be filled
3944 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3951 /* copy from the next struct field */
3952 from = src_ctx + ce_info->offset;
3954 /* prepare the bits and mask */
3955 shift_width = ce_info->lsb % 8;
3956 mask = BIT(ce_info->width) - 1;
3958 /* don't swizzle the bits until after the mask because the mask bits
3959 * will be in a different bit position on big endian machines
3961 src_word = *(u16 *)from;
3964 /* shift to correct alignment */
3965 mask <<= shift_width;
3966 src_word <<= shift_width;
3968 /* get the current bits from the target bit string */
3969 dest = dest_ctx + (ce_info->lsb / 8);
3971 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3973 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3974 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3976 /* put it all back */
3977 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3981 * ice_write_dword - write a dword to a packed context structure
3982 * @src_ctx: the context structure to read from
3983 * @dest_ctx: the context to be written to
3984 * @ce_info: a description of the struct to be filled
3987 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3989 u32 src_dword, mask;
3994 /* copy from the next struct field */
3995 from = src_ctx + ce_info->offset;
3997 /* prepare the bits and mask */
3998 shift_width = ce_info->lsb % 8;
4000 /* if the field width is exactly 32 on an x86 machine, then the shift
4001 * operation will not work because the SHL instructions count is masked
4002 * to 5 bits so the shift will do nothing
4004 if (ce_info->width < 32)
4005 mask = BIT(ce_info->width) - 1;
4009 /* don't swizzle the bits until after the mask because the mask bits
4010 * will be in a different bit position on big endian machines
4012 src_dword = *(u32 *)from;
4015 /* shift to correct alignment */
4016 mask <<= shift_width;
4017 src_dword <<= shift_width;
4019 /* get the current bits from the target bit string */
4020 dest = dest_ctx + (ce_info->lsb / 8);
4022 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
4024 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
4025 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
4027 /* put it all back */
4028 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4032 * ice_write_qword - write a qword to a packed context structure
4033 * @src_ctx: the context structure to read from
4034 * @dest_ctx: the context to be written to
4035 * @ce_info: a description of the struct to be filled
4038 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4040 u64 src_qword, mask;
4045 /* copy from the next struct field */
4046 from = src_ctx + ce_info->offset;
4048 /* prepare the bits and mask */
4049 shift_width = ce_info->lsb % 8;
4051 /* if the field width is exactly 64 on an x86 machine, then the shift
4052 * operation will not work because the SHL instructions count is masked
4053 * to 6 bits so the shift will do nothing
4055 if (ce_info->width < 64)
4056 mask = BIT_ULL(ce_info->width) - 1;
4060 /* don't swizzle the bits until after the mask because the mask bits
4061 * will be in a different bit position on big endian machines
4063 src_qword = *(u64 *)from;
4066 /* shift to correct alignment */
4067 mask <<= shift_width;
4068 src_qword <<= shift_width;
4070 /* get the current bits from the target bit string */
4071 dest = dest_ctx + (ce_info->lsb / 8);
4073 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4075 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
4076 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
4078 /* put it all back */
4079 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4083 * ice_set_ctx - set context bits in packed structure
4084 * @hw: pointer to the hardware structure
4085 * @src_ctx: pointer to a generic non-packed context structure
4086 * @dest_ctx: pointer to memory for the packed structure
4087 * @ce_info: a description of the structure to be transformed
4090 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4091 const struct ice_ctx_ele *ce_info)
4095 for (f = 0; ce_info[f].width; f++) {
4096 /* We have to deal with each element of the FW response
4097 * using the correct size so that we are correct regardless
4098 * of the endianness of the machine.
4100 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4101 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4102 f, ce_info[f].width, ce_info[f].size_of);
4105 switch (ce_info[f].size_of) {
4107 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4110 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4113 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4116 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4119 return ICE_ERR_INVAL_SIZE;
4127 * ice_read_byte - read context byte into struct
4128 * @src_ctx: the context structure to read from
4129 * @dest_ctx: the context to be written to
4130 * @ce_info: a description of the struct to be filled
4133 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4139 /* prepare the bits and mask */
4140 shift_width = ce_info->lsb % 8;
4141 mask = (u8)(BIT(ce_info->width) - 1);
4143 /* shift to correct alignment */
4144 mask <<= shift_width;
4146 /* get the current bits from the src bit string */
4147 src = src_ctx + (ce_info->lsb / 8);
4149 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4151 dest_byte &= ~(mask);
4153 dest_byte >>= shift_width;
4155 /* get the address from the struct field */
4156 target = dest_ctx + ce_info->offset;
4158 /* put it back in the struct */
4159 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4163 * ice_read_word - read context word into struct
4164 * @src_ctx: the context structure to read from
4165 * @dest_ctx: the context to be written to
4166 * @ce_info: a description of the struct to be filled
4169 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4171 u16 dest_word, mask;
4176 /* prepare the bits and mask */
4177 shift_width = ce_info->lsb % 8;
4178 mask = BIT(ce_info->width) - 1;
4180 /* shift to correct alignment */
4181 mask <<= shift_width;
4183 /* get the current bits from the src bit string */
4184 src = src_ctx + (ce_info->lsb / 8);
4186 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4188 /* the data in the memory is stored as little endian so mask it
4191 src_word &= ~(CPU_TO_LE16(mask));
4193 /* get the data back into host order before shifting */
4194 dest_word = LE16_TO_CPU(src_word);
4196 dest_word >>= shift_width;
4198 /* get the address from the struct field */
4199 target = dest_ctx + ce_info->offset;
4201 /* put it back in the struct */
4202 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4206 * ice_read_dword - read context dword into struct
4207 * @src_ctx: the context structure to read from
4208 * @dest_ctx: the context to be written to
4209 * @ce_info: a description of the struct to be filled
4212 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4214 u32 dest_dword, mask;
4219 /* prepare the bits and mask */
4220 shift_width = ce_info->lsb % 8;
4222 /* if the field width is exactly 32 on an x86 machine, then the shift
4223 * operation will not work because the SHL instructions count is masked
4224 * to 5 bits so the shift will do nothing
4226 if (ce_info->width < 32)
4227 mask = BIT(ce_info->width) - 1;
4231 /* shift to correct alignment */
4232 mask <<= shift_width;
4234 /* get the current bits from the src bit string */
4235 src = src_ctx + (ce_info->lsb / 8);
4237 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4239 /* the data in the memory is stored as little endian so mask it
4242 src_dword &= ~(CPU_TO_LE32(mask));
4244 /* get the data back into host order before shifting */
4245 dest_dword = LE32_TO_CPU(src_dword);
4247 dest_dword >>= shift_width;
4249 /* get the address from the struct field */
4250 target = dest_ctx + ce_info->offset;
4252 /* put it back in the struct */
4253 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4257 * ice_read_qword - read context qword into struct
4258 * @src_ctx: the context structure to read from
4259 * @dest_ctx: the context to be written to
4260 * @ce_info: a description of the struct to be filled
4263 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4265 u64 dest_qword, mask;
4270 /* prepare the bits and mask */
4271 shift_width = ce_info->lsb % 8;
4273 /* if the field width is exactly 64 on an x86 machine, then the shift
4274 * operation will not work because the SHL instructions count is masked
4275 * to 6 bits so the shift will do nothing
4277 if (ce_info->width < 64)
4278 mask = BIT_ULL(ce_info->width) - 1;
4282 /* shift to correct alignment */
4283 mask <<= shift_width;
4285 /* get the current bits from the src bit string */
4286 src = src_ctx + (ce_info->lsb / 8);
4288 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4290 /* the data in the memory is stored as little endian so mask it
4293 src_qword &= ~(CPU_TO_LE64(mask));
4295 /* get the data back into host order before shifting */
4296 dest_qword = LE64_TO_CPU(src_qword);
4298 dest_qword >>= shift_width;
4300 /* get the address from the struct field */
4301 target = dest_ctx + ce_info->offset;
4303 /* put it back in the struct */
4304 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4308 * ice_get_ctx - extract context bits from a packed structure
4309 * @src_ctx: pointer to a generic packed context structure
4310 * @dest_ctx: pointer to a generic non-packed context structure
4311 * @ce_info: a description of the structure to be read from
4314 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4318 for (f = 0; ce_info[f].width; f++) {
4319 switch (ce_info[f].size_of) {
4321 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4324 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4327 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4330 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4333 /* nothing to do, just keep going */
4342 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4343 * @hw: pointer to the HW struct
4344 * @vsi_handle: software VSI handle
4346 * @q_handle: software queue handle
4349 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4351 struct ice_vsi_ctx *vsi;
4352 struct ice_q_ctx *q_ctx;
4354 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4357 if (q_handle >= vsi->num_lan_q_entries[tc])
4359 if (!vsi->lan_q_ctx[tc])
4361 q_ctx = vsi->lan_q_ctx[tc];
4362 return &q_ctx[q_handle];
4367 * @pi: port information structure
4368 * @vsi_handle: software VSI handle
4370 * @q_handle: software queue handle
4371 * @num_qgrps: Number of added queue groups
4372 * @buf: list of queue groups to be added
4373 * @buf_size: size of buffer for indirect command
4374 * @cd: pointer to command details structure or NULL
4376 * This function adds one LAN queue
4379 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4380 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4381 struct ice_sq_cd *cd)
4383 struct ice_aqc_txsched_elem_data node = { 0 };
4384 struct ice_sched_node *parent;
4385 struct ice_q_ctx *q_ctx;
4386 enum ice_status status;
4389 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4392 if (num_qgrps > 1 || buf->num_txqs > 1)
4393 return ICE_ERR_MAX_LIMIT;
4397 if (!ice_is_vsi_valid(hw, vsi_handle))
4398 return ICE_ERR_PARAM;
4400 ice_acquire_lock(&pi->sched_lock);
4402 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4404 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4406 status = ICE_ERR_PARAM;
4410 /* find a parent node */
4411 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4412 ICE_SCHED_NODE_OWNER_LAN);
4414 status = ICE_ERR_PARAM;
4418 buf->parent_teid = parent->info.node_teid;
4419 node.parent_teid = parent->info.node_teid;
4420 /* Mark that the values in the "generic" section as valid. The default
4421 * value in the "generic" section is zero. This means that :
4422 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4423 * - 0 priority among siblings, indicated by Bit 1-3.
4424 * - WFQ, indicated by Bit 4.
4425 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4427 * - Bit 7 is reserved.
4428 * Without setting the generic section as valid in valid_sections, the
4429 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4431 buf->txqs[0].info.valid_sections =
4432 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4433 ICE_AQC_ELEM_VALID_EIR;
4434 buf->txqs[0].info.generic = 0;
4435 buf->txqs[0].info.cir_bw.bw_profile_idx =
4436 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4437 buf->txqs[0].info.cir_bw.bw_alloc =
4438 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4439 buf->txqs[0].info.eir_bw.bw_profile_idx =
4440 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4441 buf->txqs[0].info.eir_bw.bw_alloc =
4442 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4444 /* add the LAN queue */
4445 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4446 if (status != ICE_SUCCESS) {
4447 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4448 LE16_TO_CPU(buf->txqs[0].txq_id),
4449 hw->adminq.sq_last_status);
4453 node.node_teid = buf->txqs[0].q_teid;
4454 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4455 q_ctx->q_handle = q_handle;
4456 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4458 /* add a leaf node into scheduler tree queue layer */
4459 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4461 status = ice_sched_replay_q_bw(pi, q_ctx);
4464 ice_release_lock(&pi->sched_lock);
4470 * @pi: port information structure
4471 * @vsi_handle: software VSI handle
4473 * @num_queues: number of queues
4474 * @q_handles: pointer to software queue handle array
4475 * @q_ids: pointer to the q_id array
4476 * @q_teids: pointer to queue node teids
4477 * @rst_src: if called due to reset, specifies the reset source
4478 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4479 * @cd: pointer to command details structure or NULL
4481 * This function removes queues and their corresponding nodes in SW DB
4484 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4485 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4486 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4487 struct ice_sq_cd *cd)
4489 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4490 struct ice_aqc_dis_txq_item *qg_list;
4491 struct ice_q_ctx *q_ctx;
4495 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4501 /* if queue is disabled already yet the disable queue command
4502 * has to be sent to complete the VF reset, then call
4503 * ice_aq_dis_lan_txq without any queue information
4506 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4511 buf_size = ice_struct_size(qg_list, q_id, 1);
4512 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
4514 return ICE_ERR_NO_MEMORY;
4516 ice_acquire_lock(&pi->sched_lock);
4518 for (i = 0; i < num_queues; i++) {
4519 struct ice_sched_node *node;
4521 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4524 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4526 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4530 if (q_ctx->q_handle != q_handles[i]) {
4531 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4532 q_ctx->q_handle, q_handles[i]);
4535 qg_list->parent_teid = node->info.parent_teid;
4536 qg_list->num_qs = 1;
4537 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
4538 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4541 if (status != ICE_SUCCESS)
4543 ice_free_sched_node(pi, node);
4544 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4546 ice_release_lock(&pi->sched_lock);
4547 ice_free(hw, qg_list);
4552 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4553 * @pi: port information structure
4554 * @vsi_handle: software VSI handle
4555 * @tc_bitmap: TC bitmap
4556 * @maxqs: max queues array per TC
4557 * @owner: LAN or RDMA
4559 * This function adds/updates the VSI queues per TC.
4561 static enum ice_status
4562 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4563 u16 *maxqs, u8 owner)
4565 enum ice_status status = ICE_SUCCESS;
4568 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4571 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4572 return ICE_ERR_PARAM;
4574 ice_acquire_lock(&pi->sched_lock);
4576 ice_for_each_traffic_class(i) {
4577 /* configuration is possible only if TC node is present */
4578 if (!ice_sched_get_tc_node(pi, i))
4581 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4582 ice_is_tc_ena(tc_bitmap, i));
4587 ice_release_lock(&pi->sched_lock);
4592 * ice_cfg_vsi_lan - configure VSI LAN queues
4593 * @pi: port information structure
4594 * @vsi_handle: software VSI handle
4595 * @tc_bitmap: TC bitmap
4596 * @max_lanqs: max LAN queues array per TC
4598 * This function adds/updates the VSI LAN queues per TC.
4601 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4604 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4605 ICE_SCHED_NODE_OWNER_LAN);
4609 * ice_is_main_vsi - checks whether the VSI is main VSI
4610 * @hw: pointer to the HW struct
4611 * @vsi_handle: VSI handle
4613 * Checks whether the VSI is the main VSI (the first PF VSI created on
4616 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4618 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4622 * ice_replay_pre_init - replay pre initialization
4623 * @hw: pointer to the HW struct
4624 * @sw: pointer to switch info struct for which function initializes filters
4626 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4628 static enum ice_status
4629 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4631 enum ice_status status;
4634 /* Delete old entries from replay filter list head if there is any */
4635 ice_rm_sw_replay_rule_info(hw, sw);
4636 /* In start of replay, move entries into replay_rules list, it
4637 * will allow adding rules entries back to filt_rules list,
4638 * which is operational list.
4640 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4641 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4642 &sw->recp_list[i].filt_replay_rules);
4643 ice_sched_replay_agg_vsi_preinit(hw);
4645 status = ice_sched_replay_root_node_bw(hw->port_info);
4649 return ice_sched_replay_tc_node_bw(hw->port_info);
4653 * ice_replay_vsi - replay VSI configuration
4654 * @hw: pointer to the HW struct
4655 * @vsi_handle: driver VSI handle
4657 * Restore all VSI configuration after reset. It is required to call this
4658 * function with main VSI first.
4660 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4662 struct ice_switch_info *sw = hw->switch_info;
4663 struct ice_port_info *pi = hw->port_info;
4664 enum ice_status status;
4666 if (!ice_is_vsi_valid(hw, vsi_handle))
4667 return ICE_ERR_PARAM;
4669 /* Replay pre-initialization if there is any */
4670 if (ice_is_main_vsi(hw, vsi_handle)) {
4671 status = ice_replay_pre_init(hw, sw);
4675 /* Replay per VSI all RSS configurations */
4676 status = ice_replay_rss_cfg(hw, vsi_handle);
4679 /* Replay per VSI all filters */
4680 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4682 status = ice_replay_vsi_agg(hw, vsi_handle);
4687 * ice_replay_post - post replay configuration cleanup
4688 * @hw: pointer to the HW struct
4690 * Post replay cleanup.
4692 void ice_replay_post(struct ice_hw *hw)
4694 /* Delete old entries from replay filter list head */
4695 ice_rm_all_sw_replay_rule_info(hw);
4696 ice_sched_replay_agg(hw);
4700 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4701 * @hw: ptr to the hardware info
4702 * @reg: offset of 64 bit HW register to read from
4703 * @prev_stat_loaded: bool to specify if previous stats are loaded
4704 * @prev_stat: ptr to previous loaded stat value
4705 * @cur_stat: ptr to current stat value
4708 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4709 u64 *prev_stat, u64 *cur_stat)
4711 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4713 /* device stats are not reset at PFR, they likely will not be zeroed
4714 * when the driver starts. Thus, save the value from the first read
4715 * without adding to the statistic value so that we report stats which
4716 * count up from zero.
4718 if (!prev_stat_loaded) {
4719 *prev_stat = new_data;
4723 /* Calculate the difference between the new and old values, and then
4724 * add it to the software stat value.
4726 if (new_data >= *prev_stat)
4727 *cur_stat += new_data - *prev_stat;
4729 /* to manage the potential roll-over */
4730 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4732 /* Update the previously stored value to prepare for next read */
4733 *prev_stat = new_data;
4737 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4738 * @hw: ptr to the hardware info
4739 * @reg: offset of HW register to read from
4740 * @prev_stat_loaded: bool to specify if previous stats are loaded
4741 * @prev_stat: ptr to previous loaded stat value
4742 * @cur_stat: ptr to current stat value
4745 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4746 u64 *prev_stat, u64 *cur_stat)
4750 new_data = rd32(hw, reg);
4752 /* device stats are not reset at PFR, they likely will not be zeroed
4753 * when the driver starts. Thus, save the value from the first read
4754 * without adding to the statistic value so that we report stats which
4755 * count up from zero.
4757 if (!prev_stat_loaded) {
4758 *prev_stat = new_data;
4762 /* Calculate the difference between the new and old values, and then
4763 * add it to the software stat value.
4765 if (new_data >= *prev_stat)
4766 *cur_stat += new_data - *prev_stat;
4768 /* to manage the potential roll-over */
4769 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4771 /* Update the previously stored value to prepare for next read */
4772 *prev_stat = new_data;
4776 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4777 * @hw: ptr to the hardware info
4778 * @vsi_handle: VSI handle
4779 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4780 * @cur_stats: ptr to current stats structure
4782 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4783 * thus cannot be read using the normal ice_stat_update32 function.
4785 * Read the GLV_REPC register associated with the given VSI, and update the
4786 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4788 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4789 * cleared each time it's read.
4791 * Note that the GLV_RDPC register also counts the causes that would trigger
4792 * GLV_REPC. However, it does not give the finer grained detail about why the
4793 * packets are being dropped. The GLV_REPC values can be used to distinguish
4794 * whether Rx packets are dropped due to errors or due to no available
4798 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4799 struct ice_eth_stats *cur_stats)
4801 u16 vsi_num, no_desc, error_cnt;
4804 if (!ice_is_vsi_valid(hw, vsi_handle))
4807 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4809 /* If we haven't loaded stats yet, just clear the current value */
4810 if (!prev_stat_loaded) {
4811 wr32(hw, GLV_REPC(vsi_num), 0);
4815 repc = rd32(hw, GLV_REPC(vsi_num));
4816 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4817 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4819 /* Clear the count by writing to the stats register */
4820 wr32(hw, GLV_REPC(vsi_num), 0);
4822 cur_stats->rx_no_desc += no_desc;
4823 cur_stats->rx_errors += error_cnt;
4827 * ice_sched_query_elem - query element information from HW
4828 * @hw: pointer to the HW struct
4829 * @node_teid: node TEID to be queried
4830 * @buf: buffer to element information
4832 * This function queries HW element information
4835 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4836 struct ice_aqc_txsched_elem_data *buf)
4838 u16 buf_size, num_elem_ret = 0;
4839 enum ice_status status;
4841 buf_size = sizeof(*buf);
4842 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4843 buf->node_teid = CPU_TO_LE32(node_teid);
4844 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4846 if (status != ICE_SUCCESS || num_elem_ret != 1)
4847 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4852 * ice_get_fw_mode - returns FW mode
4853 * @hw: pointer to the HW struct
4855 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4857 #define ICE_FW_MODE_DBG_M BIT(0)
4858 #define ICE_FW_MODE_REC_M BIT(1)
4859 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4862 /* check the current FW mode */
4863 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4865 if (fw_mode & ICE_FW_MODE_DBG_M)
4866 return ICE_FW_MODE_DBG;
4867 else if (fw_mode & ICE_FW_MODE_REC_M)
4868 return ICE_FW_MODE_REC;
4869 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4870 return ICE_FW_MODE_ROLLBACK;
4872 return ICE_FW_MODE_NORMAL;
4877 * @hw: pointer to the hw struct
4878 * @topo_addr: topology address for a device to communicate with
4879 * @bus_addr: 7-bit I2C bus address
4880 * @addr: I2C memory address (I2C offset) with up to 16 bits
4881 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
4882 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
4883 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
4884 * @cd: pointer to command details structure or NULL
4889 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
4890 u16 bus_addr, __le16 addr, u8 params, u8 *data,
4891 struct ice_sq_cd *cd)
4893 struct ice_aq_desc desc = { 0 };
4894 struct ice_aqc_i2c *cmd;
4895 enum ice_status status;
4898 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
4899 cmd = &desc.params.read_write_i2c;
4902 return ICE_ERR_PARAM;
4904 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
4906 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
4907 cmd->topo_addr = topo_addr;
4908 cmd->i2c_params = params;
4909 cmd->i2c_addr = addr;
4911 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4913 struct ice_aqc_read_i2c_resp *resp;
4916 resp = &desc.params.read_i2c_resp;
4917 for (i = 0; i < data_size; i++) {
4918 *data = resp->i2c_data[i];
4928 * @hw: pointer to the hw struct
4929 * @topo_addr: topology address for a device to communicate with
4930 * @bus_addr: 7-bit I2C bus address
4931 * @addr: I2C memory address (I2C offset) with up to 16 bits
4932 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
4933 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
4934 * @cd: pointer to command details structure or NULL
4936 * Write I2C (0x06E3)
4939 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
4940 u16 bus_addr, __le16 addr, u8 params, u8 *data,
4941 struct ice_sq_cd *cd)
4943 struct ice_aq_desc desc = { 0 };
4944 struct ice_aqc_i2c *cmd;
4947 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
4948 cmd = &desc.params.read_write_i2c;
4950 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
4952 /* data_size limited to 4 */
4954 return ICE_ERR_PARAM;
4956 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
4957 cmd->topo_addr = topo_addr;
4958 cmd->i2c_params = params;
4959 cmd->i2c_addr = addr;
4961 for (i = 0; i < data_size; i++) {
4962 cmd->i2c_data[i] = *data;
4966 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4971 * @hw: pointer to the hw struct
4972 * @gpio_ctrl_handle: GPIO controller node handle
4973 * @pin_idx: IO Number of the GPIO that needs to be set
4974 * @value: SW provide IO value to set in the LSB
4975 * @cd: pointer to command details structure or NULL
4977 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
4980 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
4981 struct ice_sq_cd *cd)
4983 struct ice_aqc_gpio *cmd;
4984 struct ice_aq_desc desc;
4986 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
4987 cmd = &desc.params.read_write_gpio;
4988 cmd->gpio_ctrl_handle = gpio_ctrl_handle;
4989 cmd->gpio_num = pin_idx;
4990 cmd->gpio_val = value ? 1 : 0;
4992 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4997 * @hw: pointer to the hw struct
4998 * @gpio_ctrl_handle: GPIO controller node handle
4999 * @pin_idx: IO Number of the GPIO that needs to be set
5000 * @value: IO value read
5001 * @cd: pointer to command details structure or NULL
5003 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
5007 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
5008 bool *value, struct ice_sq_cd *cd)
5010 struct ice_aqc_gpio *cmd;
5011 struct ice_aq_desc desc;
5012 enum ice_status status;
5014 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
5015 cmd = &desc.params.read_write_gpio;
5016 cmd->gpio_ctrl_handle = gpio_ctrl_handle;
5017 cmd->gpio_num = pin_idx;
5019 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5023 *value = !!cmd->gpio_val;
5028 * ice_fw_supports_link_override
5029 * @hw: pointer to the hardware structure
5031 * Checks if the firmware supports link override
5033 bool ice_fw_supports_link_override(struct ice_hw *hw)
5035 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5036 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5038 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5039 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5041 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5049 * ice_get_link_default_override
5050 * @ldo: pointer to the link default override struct
5051 * @pi: pointer to the port info struct
5053 * Gets the link default override for a port
5056 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5057 struct ice_port_info *pi)
5059 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5060 struct ice_hw *hw = pi->hw;
5061 enum ice_status status;
5063 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5064 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5066 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5070 /* Each port has its own config; calculate for our port */
5071 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5072 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5074 /* link options first */
5075 status = ice_read_sr_word(hw, tlv_start, &buf);
5077 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5080 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5081 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5082 ICE_LINK_OVERRIDE_PHY_CFG_S;
5084 /* link PHY config */
5085 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5086 status = ice_read_sr_word(hw, offset, &buf);
5088 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5091 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5094 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5095 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5096 status = ice_read_sr_word(hw, (offset + i), &buf);
5098 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5101 /* shift 16 bits at a time to fill 64 bits */
5102 ldo->phy_type_low |= ((u64)buf << (i * 16));
5105 /* PHY types high */
5106 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5107 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5108 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5109 status = ice_read_sr_word(hw, (offset + i), &buf);
5111 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5114 /* shift 16 bits at a time to fill 64 bits */
5115 ldo->phy_type_high |= ((u64)buf << (i * 16));
5122 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5123 * @caps: get PHY capability data
5125 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5127 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5128 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5129 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5130 ICE_AQC_PHY_AN_EN_CLAUSE37))
5137 * ice_aq_set_lldp_mib - Set the LLDP MIB
5138 * @hw: pointer to the HW struct
5139 * @mib_type: Local, Remote or both Local and Remote MIBs
5140 * @buf: pointer to the caller-supplied buffer to store the MIB block
5141 * @buf_size: size of the buffer (in bytes)
5142 * @cd: pointer to command details structure or NULL
5144 * Set the LLDP MIB. (0x0A08)
5147 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5148 struct ice_sq_cd *cd)
5150 struct ice_aqc_lldp_set_local_mib *cmd;
5151 struct ice_aq_desc desc;
5153 cmd = &desc.params.lldp_set_mib;
5155 if (buf_size == 0 || !buf)
5156 return ICE_ERR_PARAM;
5158 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5160 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
5161 desc.datalen = CPU_TO_LE16(buf_size);
5163 cmd->type = mib_type;
5164 cmd->length = CPU_TO_LE16(buf_size);
5166 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5170 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
5171 * @hw: pointer to HW struct
5173 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5175 if (hw->mac_type != ICE_MAC_E810)
5178 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5179 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5181 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5182 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5184 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5191 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5192 * @hw: pointer to HW struct
5193 * @vsi_num: absolute HW index for VSI
5194 * @add: boolean for if adding or removing a filter
5197 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5199 struct ice_aqc_lldp_filter_ctrl *cmd;
5200 struct ice_aq_desc desc;
5202 cmd = &desc.params.lldp_filter_ctrl;
5204 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5207 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5209 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5211 cmd->vsi_num = CPU_TO_LE16(vsi_num);
5213 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5217 * ice_fw_supports_report_dflt_cfg
5218 * @hw: pointer to the hardware structure
5220 * Checks if the firmware supports report default configuration
5222 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5224 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5225 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5227 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5228 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5230 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {