1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2021 Intel Corporation
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 300
15 * ice_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
18 * This function sets the MAC type of the adapter based on the
19 * vendor ID and device ID stored in the HW structure.
21 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
23 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
25 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
26 return ICE_ERR_DEVICE_NOT_SUPPORTED;
28 switch (hw->device_id) {
29 case ICE_DEV_ID_E810C_BACKPLANE:
30 case ICE_DEV_ID_E810C_QSFP:
31 case ICE_DEV_ID_E810C_SFP:
32 case ICE_DEV_ID_E810_XXV_BACKPLANE:
33 case ICE_DEV_ID_E810_XXV_QSFP:
34 case ICE_DEV_ID_E810_XXV_SFP:
35 hw->mac_type = ICE_MAC_E810;
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 hw->mac_type = ICE_MAC_GENERIC;
54 hw->mac_type = ICE_MAC_UNKNOWN;
58 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
63 * ice_clear_pf_cfg - Clear PF configuration
64 * @hw: pointer to the hardware structure
66 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
67 * configuration, flow director filters, etc.).
69 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
71 struct ice_aq_desc desc;
73 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
75 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
79 * ice_aq_manage_mac_read - manage MAC address read command
80 * @hw: pointer to the HW struct
81 * @buf: a virtual buffer to hold the manage MAC read response
82 * @buf_size: Size of the virtual buffer
83 * @cd: pointer to command details structure or NULL
85 * This function is used to return per PF station MAC address (0x0107).
86 * NOTE: Upon successful completion of this command, MAC address information
87 * is returned in user specified buffer. Please interpret user specified
88 * buffer as "manage_mac_read" response.
89 * Response such as various MAC addresses are stored in HW struct (port.mac)
90 * ice_discover_dev_caps is expected to be called before this function is
93 static enum ice_status
94 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
97 struct ice_aqc_manage_mac_read_resp *resp;
98 struct ice_aqc_manage_mac_read *cmd;
99 struct ice_aq_desc desc;
100 enum ice_status status;
104 cmd = &desc.params.mac_read;
106 if (buf_size < sizeof(*resp))
107 return ICE_ERR_BUF_TOO_SHORT;
109 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
111 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
115 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
116 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
118 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
119 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
123 /* A single port can report up to two (LAN and WoL) addresses */
124 for (i = 0; i < cmd->num_addr; i++)
125 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
126 ice_memcpy(hw->port_info->mac.lan_addr,
127 resp[i].mac_addr, ETH_ALEN,
129 ice_memcpy(hw->port_info->mac.perm_addr,
131 ETH_ALEN, ICE_DMA_TO_NONDMA);
138 * ice_aq_get_phy_caps - returns PHY capabilities
139 * @pi: port information structure
140 * @qual_mods: report qualified modules
141 * @report_mode: report mode capabilities
142 * @pcaps: structure for PHY capabilities to be filled
143 * @cd: pointer to command details structure or NULL
145 * Returns the various PHY capabilities supported on the Port (0x0600)
148 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
149 struct ice_aqc_get_phy_caps_data *pcaps,
150 struct ice_sq_cd *cd)
152 struct ice_aqc_get_phy_caps *cmd;
153 u16 pcaps_size = sizeof(*pcaps);
154 struct ice_aq_desc desc;
155 enum ice_status status;
158 cmd = &desc.params.get_phy;
160 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
161 return ICE_ERR_PARAM;
164 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
165 !ice_fw_supports_report_dflt_cfg(hw))
166 return ICE_ERR_PARAM;
168 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
171 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
173 cmd->param0 |= CPU_TO_LE16(report_mode);
174 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
176 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
178 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
179 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
180 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
181 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
182 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
183 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
184 pcaps->low_power_ctrl_an);
185 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
186 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
188 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
189 pcaps->link_fec_options);
190 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
191 pcaps->module_compliance_enforcement);
192 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
193 pcaps->extended_compliance_code);
194 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
195 pcaps->module_type[0]);
196 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
197 pcaps->module_type[1]);
198 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
199 pcaps->module_type[2]);
201 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
202 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
203 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
204 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
205 sizeof(pi->phy.link_info.module_type),
206 ICE_NONDMA_TO_NONDMA);
213 * ice_aq_get_link_topo_handle - get link topology node return status
214 * @pi: port information structure
215 * @node_type: requested node type
216 * @cd: pointer to command details structure or NULL
218 * Get link topology node return status for specified node type (0x06E0)
220 * Node type cage can be used to determine if cage is present. If AQC
221 * returns error (ENOENT), then no cage present. If no cage present, then
222 * connection type is backplane or BASE-T.
224 static enum ice_status
225 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
226 struct ice_sq_cd *cd)
228 struct ice_aqc_get_link_topo *cmd;
229 struct ice_aq_desc desc;
231 cmd = &desc.params.get_link_topo;
233 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
235 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
236 ICE_AQC_LINK_TOPO_NODE_CTX_S);
239 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
241 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
245 * ice_is_media_cage_present
246 * @pi: port information structure
248 * Returns true if media cage is present, else false. If no cage, then
249 * media type is backplane or BASE-T.
251 static bool ice_is_media_cage_present(struct ice_port_info *pi)
253 /* Node type cage can be used to determine if cage is present. If AQC
254 * returns error (ENOENT), then no cage present. If no cage present then
255 * connection type is backplane or BASE-T.
257 return !ice_aq_get_link_topo_handle(pi,
258 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
263 * ice_get_media_type - Gets media type
264 * @pi: port information structure
266 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
268 struct ice_link_status *hw_link_info;
271 return ICE_MEDIA_UNKNOWN;
273 hw_link_info = &pi->phy.link_info;
274 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
275 /* If more than one media type is selected, report unknown */
276 return ICE_MEDIA_UNKNOWN;
278 if (hw_link_info->phy_type_low) {
279 /* 1G SGMII is a special case where some DA cable PHYs
280 * may show this as an option when it really shouldn't
281 * be since SGMII is meant to be between a MAC and a PHY
282 * in a backplane. Try to detect this case and handle it
284 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
285 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
286 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
287 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
288 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
291 switch (hw_link_info->phy_type_low) {
292 case ICE_PHY_TYPE_LOW_1000BASE_SX:
293 case ICE_PHY_TYPE_LOW_1000BASE_LX:
294 case ICE_PHY_TYPE_LOW_10GBASE_SR:
295 case ICE_PHY_TYPE_LOW_10GBASE_LR:
296 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
297 case ICE_PHY_TYPE_LOW_25GBASE_SR:
298 case ICE_PHY_TYPE_LOW_25GBASE_LR:
299 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
300 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
301 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
302 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
303 case ICE_PHY_TYPE_LOW_50GBASE_SR:
304 case ICE_PHY_TYPE_LOW_50GBASE_FR:
305 case ICE_PHY_TYPE_LOW_50GBASE_LR:
306 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
307 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
308 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
309 case ICE_PHY_TYPE_LOW_100GBASE_DR:
310 return ICE_MEDIA_FIBER;
311 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
312 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
313 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
314 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
315 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
316 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
317 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
318 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
319 return ICE_MEDIA_FIBER;
320 case ICE_PHY_TYPE_LOW_100BASE_TX:
321 case ICE_PHY_TYPE_LOW_1000BASE_T:
322 case ICE_PHY_TYPE_LOW_2500BASE_T:
323 case ICE_PHY_TYPE_LOW_5GBASE_T:
324 case ICE_PHY_TYPE_LOW_10GBASE_T:
325 case ICE_PHY_TYPE_LOW_25GBASE_T:
326 return ICE_MEDIA_BASET;
327 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
328 case ICE_PHY_TYPE_LOW_25GBASE_CR:
329 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
330 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
331 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
332 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
333 case ICE_PHY_TYPE_LOW_50GBASE_CP:
334 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
335 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
336 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
338 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
339 case ICE_PHY_TYPE_LOW_40G_XLAUI:
340 case ICE_PHY_TYPE_LOW_50G_LAUI2:
341 case ICE_PHY_TYPE_LOW_50G_AUI2:
342 case ICE_PHY_TYPE_LOW_50G_AUI1:
343 case ICE_PHY_TYPE_LOW_100G_AUI4:
344 case ICE_PHY_TYPE_LOW_100G_CAUI4:
345 if (ice_is_media_cage_present(pi))
346 return ICE_MEDIA_AUI;
348 case ICE_PHY_TYPE_LOW_1000BASE_KX:
349 case ICE_PHY_TYPE_LOW_2500BASE_KX:
350 case ICE_PHY_TYPE_LOW_2500BASE_X:
351 case ICE_PHY_TYPE_LOW_5GBASE_KR:
352 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
353 case ICE_PHY_TYPE_LOW_25GBASE_KR:
354 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
355 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
356 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
357 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
358 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
359 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
360 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
361 return ICE_MEDIA_BACKPLANE;
364 switch (hw_link_info->phy_type_high) {
365 case ICE_PHY_TYPE_HIGH_100G_AUI2:
366 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
367 if (ice_is_media_cage_present(pi))
368 return ICE_MEDIA_AUI;
370 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
371 return ICE_MEDIA_BACKPLANE;
372 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
373 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
374 return ICE_MEDIA_FIBER;
377 return ICE_MEDIA_UNKNOWN;
381 * ice_aq_get_link_info
382 * @pi: port information structure
383 * @ena_lse: enable/disable LinkStatusEvent reporting
384 * @link: pointer to link status structure - optional
385 * @cd: pointer to command details structure or NULL
387 * Get Link Status (0x607). Returns the link status of the adapter.
390 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
391 struct ice_link_status *link, struct ice_sq_cd *cd)
393 struct ice_aqc_get_link_status_data link_data = { 0 };
394 struct ice_aqc_get_link_status *resp;
395 struct ice_link_status *li_old, *li;
396 enum ice_media_type *hw_media_type;
397 struct ice_fc_info *hw_fc_info;
398 bool tx_pause, rx_pause;
399 struct ice_aq_desc desc;
400 enum ice_status status;
405 return ICE_ERR_PARAM;
407 li_old = &pi->phy.link_info_old;
408 hw_media_type = &pi->phy.media_type;
409 li = &pi->phy.link_info;
410 hw_fc_info = &pi->fc;
412 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
413 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
414 resp = &desc.params.get_link_status;
415 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
416 resp->lport_num = pi->lport;
418 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
420 if (status != ICE_SUCCESS)
423 /* save off old link status information */
426 /* update current link status information */
427 li->link_speed = LE16_TO_CPU(link_data.link_speed);
428 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
429 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
430 *hw_media_type = ice_get_media_type(pi);
431 li->link_info = link_data.link_info;
432 li->link_cfg_err = link_data.link_cfg_err;
433 li->an_info = link_data.an_info;
434 li->ext_info = link_data.ext_info;
435 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
436 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
437 li->topo_media_conflict = link_data.topo_media_conflict;
438 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
439 ICE_AQ_CFG_PACING_TYPE_M);
442 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
443 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
444 if (tx_pause && rx_pause)
445 hw_fc_info->current_mode = ICE_FC_FULL;
447 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
449 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
451 hw_fc_info->current_mode = ICE_FC_NONE;
453 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
455 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
456 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
457 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
458 (unsigned long long)li->phy_type_low);
459 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
460 (unsigned long long)li->phy_type_high);
461 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
462 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
463 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
464 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
465 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
466 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
467 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
469 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
471 /* save link status information */
475 /* flag cleared so calling functions don't call AQ again */
476 pi->phy.get_link_info = false;
482 * ice_fill_tx_timer_and_fc_thresh
483 * @hw: pointer to the HW struct
484 * @cmd: pointer to MAC cfg structure
486 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
490 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
491 struct ice_aqc_set_mac_cfg *cmd)
493 u16 fc_thres_val, tx_timer_val;
496 /* We read back the transmit timer and fc threshold value of
497 * LFC. Thus, we will use index =
498 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
500 * Also, because we are opearating on transmit timer and fc
501 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
503 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
505 /* Retrieve the transmit timer */
506 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
508 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
509 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
511 /* Retrieve the fc threshold */
512 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
513 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
515 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
520 * @hw: pointer to the HW struct
521 * @max_frame_size: Maximum Frame Size to be supported
522 * @cd: pointer to command details structure or NULL
524 * Set MAC configuration (0x0603)
527 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
529 struct ice_aqc_set_mac_cfg *cmd;
530 struct ice_aq_desc desc;
532 cmd = &desc.params.set_mac_cfg;
534 if (max_frame_size == 0)
535 return ICE_ERR_PARAM;
537 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
539 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
541 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
543 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
547 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
548 * @hw: pointer to the HW struct
550 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
552 struct ice_switch_info *sw;
553 enum ice_status status;
555 hw->switch_info = (struct ice_switch_info *)
556 ice_malloc(hw, sizeof(*hw->switch_info));
558 sw = hw->switch_info;
561 return ICE_ERR_NO_MEMORY;
563 INIT_LIST_HEAD(&sw->vsi_list_map_head);
564 sw->prof_res_bm_init = 0;
566 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
568 ice_free(hw, hw->switch_info);
575 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
576 * @hw: pointer to the HW struct
577 * @sw: pointer to switch info struct for which function clears filters
580 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
582 struct ice_vsi_list_map_info *v_pos_map;
583 struct ice_vsi_list_map_info *v_tmp_map;
584 struct ice_sw_recipe *recps;
590 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
591 ice_vsi_list_map_info, list_entry) {
592 LIST_DEL(&v_pos_map->list_entry);
593 ice_free(hw, v_pos_map);
595 recps = sw->recp_list;
596 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
597 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
599 recps[i].root_rid = i;
600 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
601 &recps[i].rg_list, ice_recp_grp_entry,
603 LIST_DEL(&rg_entry->l_entry);
604 ice_free(hw, rg_entry);
607 if (recps[i].adv_rule) {
608 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
609 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
611 ice_destroy_lock(&recps[i].filt_rule_lock);
612 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
613 &recps[i].filt_rules,
614 ice_adv_fltr_mgmt_list_entry,
616 LIST_DEL(&lst_itr->list_entry);
617 ice_free(hw, lst_itr->lkups);
618 ice_free(hw, lst_itr);
621 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
623 ice_destroy_lock(&recps[i].filt_rule_lock);
624 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
625 &recps[i].filt_rules,
626 ice_fltr_mgmt_list_entry,
628 LIST_DEL(&lst_itr->list_entry);
629 ice_free(hw, lst_itr);
632 if (recps[i].root_buf)
633 ice_free(hw, recps[i].root_buf);
635 ice_rm_sw_replay_rule_info(hw, sw);
636 ice_free(hw, sw->recp_list);
641 * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
642 * @hw: pointer to the HW struct
644 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
646 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
650 * ice_get_itr_intrl_gran
651 * @hw: pointer to the HW struct
653 * Determines the ITR/INTRL granularities based on the maximum aggregate
654 * bandwidth according to the device's configuration during power-on.
656 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
658 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
659 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
660 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
662 switch (max_agg_bw) {
663 case ICE_MAX_AGG_BW_200G:
664 case ICE_MAX_AGG_BW_100G:
665 case ICE_MAX_AGG_BW_50G:
666 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
667 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
669 case ICE_MAX_AGG_BW_25G:
670 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
671 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
677 * ice_print_rollback_msg - print FW rollback message
678 * @hw: pointer to the hardware structure
680 void ice_print_rollback_msg(struct ice_hw *hw)
682 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
683 struct ice_orom_info *orom;
684 struct ice_nvm_info *nvm;
686 orom = &hw->flash.orom;
687 nvm = &hw->flash.nvm;
689 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
690 nvm->major, nvm->minor, nvm->eetrack, orom->major,
691 orom->build, orom->patch);
693 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
694 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
698 * ice_init_hw - main hardware initialization routine
699 * @hw: pointer to the hardware structure
701 enum ice_status ice_init_hw(struct ice_hw *hw)
703 struct ice_aqc_get_phy_caps_data *pcaps;
704 enum ice_status status;
708 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
710 /* Set MAC type based on DeviceID */
711 status = ice_set_mac_type(hw);
715 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
716 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
717 PF_FUNC_RID_FUNCTION_NUMBER_S;
719 status = ice_reset(hw, ICE_RESET_PFR);
723 ice_get_itr_intrl_gran(hw);
725 status = ice_create_all_ctrlq(hw);
727 goto err_unroll_cqinit;
729 status = ice_init_nvm(hw);
731 goto err_unroll_cqinit;
733 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
734 ice_print_rollback_msg(hw);
736 status = ice_clear_pf_cfg(hw);
738 goto err_unroll_cqinit;
740 /* Set bit to enable Flow Director filters */
741 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
742 INIT_LIST_HEAD(&hw->fdir_list_head);
744 ice_clear_pxe_mode(hw);
746 status = ice_get_caps(hw);
748 goto err_unroll_cqinit;
750 hw->port_info = (struct ice_port_info *)
751 ice_malloc(hw, sizeof(*hw->port_info));
752 if (!hw->port_info) {
753 status = ICE_ERR_NO_MEMORY;
754 goto err_unroll_cqinit;
757 /* set the back pointer to HW */
758 hw->port_info->hw = hw;
760 /* Initialize port_info struct with switch configuration data */
761 status = ice_get_initial_sw_cfg(hw);
763 goto err_unroll_alloc;
766 /* Query the allocated resources for Tx scheduler */
767 status = ice_sched_query_res_alloc(hw);
769 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
770 goto err_unroll_alloc;
772 ice_sched_get_psm_clk_freq(hw);
774 /* Initialize port_info struct with scheduler data */
775 status = ice_sched_init_port(hw->port_info);
777 goto err_unroll_sched;
778 pcaps = (struct ice_aqc_get_phy_caps_data *)
779 ice_malloc(hw, sizeof(*pcaps));
781 status = ICE_ERR_NO_MEMORY;
782 goto err_unroll_sched;
785 /* Initialize port_info struct with PHY capabilities */
786 status = ice_aq_get_phy_caps(hw->port_info, false,
787 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
790 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
793 /* Initialize port_info struct with link information */
794 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
796 goto err_unroll_sched;
797 /* need a valid SW entry point to build a Tx tree */
798 if (!hw->sw_entry_point_layer) {
799 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
800 status = ICE_ERR_CFG;
801 goto err_unroll_sched;
803 INIT_LIST_HEAD(&hw->agg_list);
804 /* Initialize max burst size */
805 if (!hw->max_burst_size)
806 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
807 status = ice_init_fltr_mgmt_struct(hw);
809 goto err_unroll_sched;
811 /* Get MAC information */
812 /* A single port can report up to two (LAN and WoL) addresses */
813 mac_buf = ice_calloc(hw, 2,
814 sizeof(struct ice_aqc_manage_mac_read_resp));
815 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
818 status = ICE_ERR_NO_MEMORY;
819 goto err_unroll_fltr_mgmt_struct;
822 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
823 ice_free(hw, mac_buf);
826 goto err_unroll_fltr_mgmt_struct;
827 /* enable jumbo frame support at MAC level */
828 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
830 goto err_unroll_fltr_mgmt_struct;
831 /* Obtain counter base index which would be used by flow director */
832 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
834 goto err_unroll_fltr_mgmt_struct;
835 status = ice_init_hw_tbls(hw);
837 goto err_unroll_fltr_mgmt_struct;
838 ice_init_lock(&hw->tnl_lock);
842 err_unroll_fltr_mgmt_struct:
843 ice_cleanup_fltr_mgmt_struct(hw);
845 ice_sched_cleanup_all(hw);
847 ice_free(hw, hw->port_info);
848 hw->port_info = NULL;
850 ice_destroy_all_ctrlq(hw);
855 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
856 * @hw: pointer to the hardware structure
858 * This should be called only during nominal operation, not as a result of
859 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
860 * applicable initializations if it fails for any reason.
862 void ice_deinit_hw(struct ice_hw *hw)
864 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
865 ice_cleanup_fltr_mgmt_struct(hw);
867 ice_sched_cleanup_all(hw);
868 ice_sched_clear_agg(hw);
870 ice_free_hw_tbls(hw);
871 ice_destroy_lock(&hw->tnl_lock);
874 ice_free(hw, hw->port_info);
875 hw->port_info = NULL;
878 ice_destroy_all_ctrlq(hw);
880 /* Clear VSI contexts if not already cleared */
881 ice_clear_all_vsi_ctx(hw);
885 * ice_check_reset - Check to see if a global reset is complete
886 * @hw: pointer to the hardware structure
888 enum ice_status ice_check_reset(struct ice_hw *hw)
890 u32 cnt, reg = 0, grst_timeout, uld_mask;
892 /* Poll for Device Active state in case a recent CORER, GLOBR,
893 * or EMPR has occurred. The grst delay value is in 100ms units.
894 * Add 1sec for outstanding AQ commands that can take a long time.
896 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
897 GLGEN_RSTCTL_GRSTDEL_S) + 10;
899 for (cnt = 0; cnt < grst_timeout; cnt++) {
900 ice_msec_delay(100, true);
901 reg = rd32(hw, GLGEN_RSTAT);
902 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
906 if (cnt == grst_timeout) {
907 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
908 return ICE_ERR_RESET_FAILED;
911 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
912 GLNVM_ULD_PCIER_DONE_1_M |\
913 GLNVM_ULD_CORER_DONE_M |\
914 GLNVM_ULD_GLOBR_DONE_M |\
915 GLNVM_ULD_POR_DONE_M |\
916 GLNVM_ULD_POR_DONE_1_M |\
917 GLNVM_ULD_PCIER_DONE_2_M)
919 uld_mask = ICE_RESET_DONE_MASK;
921 /* Device is Active; check Global Reset processes are done */
922 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
923 reg = rd32(hw, GLNVM_ULD) & uld_mask;
924 if (reg == uld_mask) {
925 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
928 ice_msec_delay(10, true);
931 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
932 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
934 return ICE_ERR_RESET_FAILED;
941 * ice_pf_reset - Reset the PF
942 * @hw: pointer to the hardware structure
944 * If a global reset has been triggered, this function checks
945 * for its completion and then issues the PF reset
947 static enum ice_status ice_pf_reset(struct ice_hw *hw)
951 /* If at function entry a global reset was already in progress, i.e.
952 * state is not 'device active' or any of the reset done bits are not
953 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
954 * global reset is done.
956 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
957 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
958 /* poll on global reset currently in progress until done */
959 if (ice_check_reset(hw))
960 return ICE_ERR_RESET_FAILED;
966 reg = rd32(hw, PFGEN_CTRL);
968 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
970 /* Wait for the PFR to complete. The wait time is the global config lock
971 * timeout plus the PFR timeout which will account for a possible reset
972 * that is occurring during a download package operation.
974 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
975 ICE_PF_RESET_WAIT_COUNT; cnt++) {
976 reg = rd32(hw, PFGEN_CTRL);
977 if (!(reg & PFGEN_CTRL_PFSWR_M))
980 ice_msec_delay(1, true);
983 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
984 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
985 return ICE_ERR_RESET_FAILED;
992 * ice_reset - Perform different types of reset
993 * @hw: pointer to the hardware structure
994 * @req: reset request
996 * This function triggers a reset as specified by the req parameter.
999 * If anything other than a PF reset is triggered, PXE mode is restored.
1000 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1001 * interface has been restored in the rebuild flow.
1003 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1009 return ice_pf_reset(hw);
1010 case ICE_RESET_CORER:
1011 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1012 val = GLGEN_RTRIG_CORER_M;
1014 case ICE_RESET_GLOBR:
1015 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1016 val = GLGEN_RTRIG_GLOBR_M;
1019 return ICE_ERR_PARAM;
1022 val |= rd32(hw, GLGEN_RTRIG);
1023 wr32(hw, GLGEN_RTRIG, val);
1026 /* wait for the FW to be ready */
1027 return ice_check_reset(hw);
1031 * ice_copy_rxq_ctx_to_hw
1032 * @hw: pointer to the hardware structure
1033 * @ice_rxq_ctx: pointer to the rxq context
1034 * @rxq_index: the index of the Rx queue
1036 * Copies rxq context from dense structure to HW register space
1038 static enum ice_status
1039 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1044 return ICE_ERR_BAD_PTR;
1046 if (rxq_index > QRX_CTRL_MAX_INDEX)
1047 return ICE_ERR_PARAM;
1049 /* Copy each dword separately to HW */
1050 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1051 wr32(hw, QRX_CONTEXT(i, rxq_index),
1052 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1054 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1055 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1061 /* LAN Rx Queue Context */
1062 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1063 /* Field Width LSB */
1064 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1065 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1066 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1067 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1068 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1069 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1070 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1071 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1072 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1073 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1074 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1075 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1076 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1077 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1078 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1079 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1080 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1081 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1082 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1083 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1089 * @hw: pointer to the hardware structure
1090 * @rlan_ctx: pointer to the rxq context
1091 * @rxq_index: the index of the Rx queue
1093 * Converts rxq context from sparse to dense structure and then writes
1094 * it to HW register space and enables the hardware to prefetch descriptors
1095 * instead of only fetching them on demand
1098 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1101 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1104 return ICE_ERR_BAD_PTR;
1106 rlan_ctx->prefena = 1;
1108 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1109 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1114 * @hw: pointer to the hardware structure
1115 * @rxq_index: the index of the Rx queue to clear
1117 * Clears rxq context in HW register space
1119 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1123 if (rxq_index > QRX_CTRL_MAX_INDEX)
1124 return ICE_ERR_PARAM;
1126 /* Clear each dword register separately */
1127 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1128 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1133 /* LAN Tx Queue Context */
1134 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1135 /* Field Width LSB */
1136 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1137 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1138 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1139 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1140 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1141 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1142 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1143 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1144 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1145 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1146 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1147 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1148 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1149 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1150 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1151 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1152 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1153 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1154 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1155 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1156 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1157 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1158 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1159 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1160 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1161 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1162 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1163 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1168 * ice_copy_tx_cmpltnq_ctx_to_hw
1169 * @hw: pointer to the hardware structure
1170 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1171 * @tx_cmpltnq_index: the index of the completion queue
1173 * Copies Tx completion queue context from dense structure to HW register space
1175 static enum ice_status
1176 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1177 u32 tx_cmpltnq_index)
1181 if (!ice_tx_cmpltnq_ctx)
1182 return ICE_ERR_BAD_PTR;
1184 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1185 return ICE_ERR_PARAM;
1187 /* Copy each dword separately to HW */
1188 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1189 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1190 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1192 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1193 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1199 /* LAN Tx Completion Queue Context */
1200 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1201 /* Field Width LSB */
1202 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1203 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1204 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1205 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1206 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1207 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1208 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1209 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1210 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1211 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1216 * ice_write_tx_cmpltnq_ctx
1217 * @hw: pointer to the hardware structure
1218 * @tx_cmpltnq_ctx: pointer to the completion queue context
1219 * @tx_cmpltnq_index: the index of the completion queue
1221 * Converts completion queue context from sparse to dense structure and then
1222 * writes it to HW register space
1225 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1226 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1227 u32 tx_cmpltnq_index)
1229 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1231 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1232 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1236 * ice_clear_tx_cmpltnq_ctx
1237 * @hw: pointer to the hardware structure
1238 * @tx_cmpltnq_index: the index of the completion queue to clear
1240 * Clears Tx completion queue context in HW register space
1243 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1247 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1248 return ICE_ERR_PARAM;
1250 /* Clear each dword register separately */
1251 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1252 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1258 * ice_copy_tx_drbell_q_ctx_to_hw
1259 * @hw: pointer to the hardware structure
1260 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1261 * @tx_drbell_q_index: the index of the doorbell queue
1263 * Copies doorbell queue context from dense structure to HW register space
1265 static enum ice_status
1266 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1267 u32 tx_drbell_q_index)
1271 if (!ice_tx_drbell_q_ctx)
1272 return ICE_ERR_BAD_PTR;
1274 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1275 return ICE_ERR_PARAM;
1277 /* Copy each dword separately to HW */
1278 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1279 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1280 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1282 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1283 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1289 /* LAN Tx Doorbell Queue Context info */
1290 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1291 /* Field Width LSB */
1292 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1293 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1294 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1295 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1296 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1297 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1298 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1299 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1300 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1301 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1302 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1307 * ice_write_tx_drbell_q_ctx
1308 * @hw: pointer to the hardware structure
1309 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1310 * @tx_drbell_q_index: the index of the doorbell queue
1312 * Converts doorbell queue context from sparse to dense structure and then
1313 * writes it to HW register space
1316 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1317 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1318 u32 tx_drbell_q_index)
1320 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1322 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1323 ice_tx_drbell_q_ctx_info);
1324 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1328 * ice_clear_tx_drbell_q_ctx
1329 * @hw: pointer to the hardware structure
1330 * @tx_drbell_q_index: the index of the doorbell queue to clear
1332 * Clears doorbell queue context in HW register space
1335 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1339 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1340 return ICE_ERR_PARAM;
1342 /* Clear each dword register separately */
1343 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1344 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1349 /* FW Admin Queue command wrappers */
1352 * ice_should_retry_sq_send_cmd
1353 * @opcode: AQ opcode
1355 * Decide if we should retry the send command routine for the ATQ, depending
1358 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1361 case ice_aqc_opc_get_link_topo:
1362 case ice_aqc_opc_lldp_stop:
1363 case ice_aqc_opc_lldp_start:
1364 case ice_aqc_opc_lldp_filter_ctrl:
1372 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1373 * @hw: pointer to the HW struct
1374 * @cq: pointer to the specific Control queue
1375 * @desc: prefilled descriptor describing the command
1376 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1377 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1378 * @cd: pointer to command details structure
1380 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1381 * Queue if the EBUSY AQ error is returned.
1383 static enum ice_status
1384 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1385 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1386 struct ice_sq_cd *cd)
1388 struct ice_aq_desc desc_cpy;
1389 enum ice_status status;
1390 bool is_cmd_for_retry;
1395 opcode = LE16_TO_CPU(desc->opcode);
1396 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1397 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1399 if (is_cmd_for_retry) {
1401 buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1403 return ICE_ERR_NO_MEMORY;
1406 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1407 ICE_NONDMA_TO_NONDMA);
1411 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1413 if (!is_cmd_for_retry || status == ICE_SUCCESS ||
1414 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1418 ice_memcpy(buf, buf_cpy, buf_size,
1419 ICE_NONDMA_TO_NONDMA);
1421 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1422 ICE_NONDMA_TO_NONDMA);
1424 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1426 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1429 ice_free(hw, buf_cpy);
1435 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1436 * @hw: pointer to the HW struct
1437 * @desc: descriptor describing the command
1438 * @buf: buffer to use for indirect commands (NULL for direct commands)
1439 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1440 * @cd: pointer to command details structure
1442 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1445 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1446 u16 buf_size, struct ice_sq_cd *cd)
1448 if (hw->aq_send_cmd_fn) {
1449 enum ice_status status = ICE_ERR_NOT_READY;
1450 u16 retval = ICE_AQ_RC_OK;
1452 ice_acquire_lock(&hw->adminq.sq_lock);
1453 if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
1455 retval = LE16_TO_CPU(desc->retval);
1456 /* strip off FW internal code */
1459 if (retval == ICE_AQ_RC_OK)
1460 status = ICE_SUCCESS;
1462 status = ICE_ERR_AQ_ERROR;
1465 hw->adminq.sq_last_status = (enum ice_aq_err)retval;
1466 ice_release_lock(&hw->adminq.sq_lock);
1470 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1475 * @hw: pointer to the HW struct
1476 * @cd: pointer to command details structure or NULL
1478 * Get the firmware version (0x0001) from the admin queue commands
1480 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1482 struct ice_aqc_get_ver *resp;
1483 struct ice_aq_desc desc;
1484 enum ice_status status;
1486 resp = &desc.params.get_ver;
1488 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1490 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1493 hw->fw_branch = resp->fw_branch;
1494 hw->fw_maj_ver = resp->fw_major;
1495 hw->fw_min_ver = resp->fw_minor;
1496 hw->fw_patch = resp->fw_patch;
1497 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1498 hw->api_branch = resp->api_branch;
1499 hw->api_maj_ver = resp->api_major;
1500 hw->api_min_ver = resp->api_minor;
1501 hw->api_patch = resp->api_patch;
1508 * ice_aq_send_driver_ver
1509 * @hw: pointer to the HW struct
1510 * @dv: driver's major, minor version
1511 * @cd: pointer to command details structure or NULL
1513 * Send the driver version (0x0002) to the firmware
1516 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1517 struct ice_sq_cd *cd)
1519 struct ice_aqc_driver_ver *cmd;
1520 struct ice_aq_desc desc;
1523 cmd = &desc.params.driver_ver;
1526 return ICE_ERR_PARAM;
1528 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1530 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1531 cmd->major_ver = dv->major_ver;
1532 cmd->minor_ver = dv->minor_ver;
1533 cmd->build_ver = dv->build_ver;
1534 cmd->subbuild_ver = dv->subbuild_ver;
1537 while (len < sizeof(dv->driver_string) &&
1538 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1541 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1546 * @hw: pointer to the HW struct
1547 * @unloading: is the driver unloading itself
1549 * Tell the Firmware that we're shutting down the AdminQ and whether
1550 * or not the driver is unloading as well (0x0003).
1552 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1554 struct ice_aqc_q_shutdown *cmd;
1555 struct ice_aq_desc desc;
1557 cmd = &desc.params.q_shutdown;
1559 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1562 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1564 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1569 * @hw: pointer to the HW struct
1571 * @access: access type
1572 * @sdp_number: resource number
1573 * @timeout: the maximum time in ms that the driver may hold the resource
1574 * @cd: pointer to command details structure or NULL
1576 * Requests common resource using the admin queue commands (0x0008).
1577 * When attempting to acquire the Global Config Lock, the driver can
1578 * learn of three states:
1579 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1580 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1581 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1582 * successfully downloaded the package; the driver does
1583 * not have to download the package and can continue
1586 * Note that if the caller is in an acquire lock, perform action, release lock
1587 * phase of operation, it is possible that the FW may detect a timeout and issue
1588 * a CORER. In this case, the driver will receive a CORER interrupt and will
1589 * have to determine its cause. The calling thread that is handling this flow
1590 * will likely get an error propagated back to it indicating the Download
1591 * Package, Update Package or the Release Resource AQ commands timed out.
1593 static enum ice_status
1594 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1595 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1596 struct ice_sq_cd *cd)
1598 struct ice_aqc_req_res *cmd_resp;
1599 struct ice_aq_desc desc;
1600 enum ice_status status;
1602 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1604 cmd_resp = &desc.params.res_owner;
1606 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1608 cmd_resp->res_id = CPU_TO_LE16(res);
1609 cmd_resp->access_type = CPU_TO_LE16(access);
1610 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1611 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1614 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1616 /* The completion specifies the maximum time in ms that the driver
1617 * may hold the resource in the Timeout field.
1620 /* Global config lock response utilizes an additional status field.
1622 * If the Global config lock resource is held by some other driver, the
1623 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1624 * and the timeout field indicates the maximum time the current owner
1625 * of the resource has to free it.
1627 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1628 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1629 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1631 } else if (LE16_TO_CPU(cmd_resp->status) ==
1632 ICE_AQ_RES_GLBL_IN_PROG) {
1633 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1634 return ICE_ERR_AQ_ERROR;
1635 } else if (LE16_TO_CPU(cmd_resp->status) ==
1636 ICE_AQ_RES_GLBL_DONE) {
1637 return ICE_ERR_AQ_NO_WORK;
1640 /* invalid FW response, force a timeout immediately */
1642 return ICE_ERR_AQ_ERROR;
1645 /* If the resource is held by some other driver, the command completes
1646 * with a busy return value and the timeout field indicates the maximum
1647 * time the current owner of the resource has to free it.
1649 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1650 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1656 * ice_aq_release_res
1657 * @hw: pointer to the HW struct
1659 * @sdp_number: resource number
1660 * @cd: pointer to command details structure or NULL
1662 * release common resource using the admin queue commands (0x0009)
1664 static enum ice_status
1665 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1666 struct ice_sq_cd *cd)
1668 struct ice_aqc_req_res *cmd;
1669 struct ice_aq_desc desc;
1671 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1673 cmd = &desc.params.res_owner;
1675 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1677 cmd->res_id = CPU_TO_LE16(res);
1678 cmd->res_number = CPU_TO_LE32(sdp_number);
1680 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1685 * @hw: pointer to the HW structure
1687 * @access: access type (read or write)
1688 * @timeout: timeout in milliseconds
1690 * This function will attempt to acquire the ownership of a resource.
1693 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1694 enum ice_aq_res_access_type access, u32 timeout)
1696 #define ICE_RES_POLLING_DELAY_MS 10
1697 u32 delay = ICE_RES_POLLING_DELAY_MS;
1698 u32 time_left = timeout;
1699 enum ice_status status;
1701 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1703 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1705 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1706 * previously acquired the resource and performed any necessary updates;
1707 * in this case the caller does not obtain the resource and has no
1708 * further work to do.
1710 if (status == ICE_ERR_AQ_NO_WORK)
1711 goto ice_acquire_res_exit;
1714 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1716 /* If necessary, poll until the current lock owner timeouts */
1717 timeout = time_left;
1718 while (status && timeout && time_left) {
1719 ice_msec_delay(delay, true);
1720 timeout = (timeout > delay) ? timeout - delay : 0;
1721 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1723 if (status == ICE_ERR_AQ_NO_WORK)
1724 /* lock free, but no work to do */
1731 if (status && status != ICE_ERR_AQ_NO_WORK)
1732 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1734 ice_acquire_res_exit:
1735 if (status == ICE_ERR_AQ_NO_WORK) {
1736 if (access == ICE_RES_WRITE)
1737 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1739 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1746 * @hw: pointer to the HW structure
1749 * This function will release a resource using the proper Admin Command.
1751 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1753 enum ice_status status;
1754 u32 total_delay = 0;
1756 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1758 status = ice_aq_release_res(hw, res, 0, NULL);
1760 /* there are some rare cases when trying to release the resource
1761 * results in an admin queue timeout, so handle them correctly
1763 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1764 (total_delay < hw->adminq.sq_cmd_timeout)) {
1765 ice_msec_delay(1, true);
1766 status = ice_aq_release_res(hw, res, 0, NULL);
1772 * ice_aq_alloc_free_res - command to allocate/free resources
1773 * @hw: pointer to the HW struct
1774 * @num_entries: number of resource entries in buffer
1775 * @buf: Indirect buffer to hold data parameters and response
1776 * @buf_size: size of buffer for indirect commands
1777 * @opc: pass in the command opcode
1778 * @cd: pointer to command details structure or NULL
1780 * Helper function to allocate/free resources using the admin queue commands
1783 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1784 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1785 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1787 struct ice_aqc_alloc_free_res_cmd *cmd;
1788 struct ice_aq_desc desc;
1790 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1792 cmd = &desc.params.sw_res_ctrl;
1795 return ICE_ERR_PARAM;
1797 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
1798 return ICE_ERR_PARAM;
1800 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1802 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1804 cmd->num_entries = CPU_TO_LE16(num_entries);
1806 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1810 * ice_alloc_hw_res - allocate resource
1811 * @hw: pointer to the HW struct
1812 * @type: type of resource
1813 * @num: number of resources to allocate
1814 * @btm: allocate from bottom
1815 * @res: pointer to array that will receive the resources
1818 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1820 struct ice_aqc_alloc_free_res_elem *buf;
1821 enum ice_status status;
1824 buf_len = ice_struct_size(buf, elem, num);
1825 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1827 return ICE_ERR_NO_MEMORY;
1829 /* Prepare buffer to allocate resource. */
1830 buf->num_elems = CPU_TO_LE16(num);
1831 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1832 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1834 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1836 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1837 ice_aqc_opc_alloc_res, NULL);
1839 goto ice_alloc_res_exit;
1841 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
1842 ICE_NONDMA_TO_NONDMA);
1850 * ice_free_hw_res - free allocated HW resource
1851 * @hw: pointer to the HW struct
1852 * @type: type of resource to free
1853 * @num: number of resources
1854 * @res: pointer to array that contains the resources to free
1856 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1858 struct ice_aqc_alloc_free_res_elem *buf;
1859 enum ice_status status;
1862 buf_len = ice_struct_size(buf, elem, num);
1863 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1865 return ICE_ERR_NO_MEMORY;
1867 /* Prepare buffer to free resource. */
1868 buf->num_elems = CPU_TO_LE16(num);
1869 buf->res_type = CPU_TO_LE16(type);
1870 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
1871 ICE_NONDMA_TO_NONDMA);
1873 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1874 ice_aqc_opc_free_res, NULL);
1876 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1883 * ice_get_num_per_func - determine number of resources per PF
1884 * @hw: pointer to the HW structure
1885 * @max: value to be evenly split between each PF
1887 * Determine the number of valid functions by going through the bitmap returned
1888 * from parsing capabilities and use this to calculate the number of resources
1889 * per PF based on the max value passed in.
1891 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1895 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1896 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1897 ICE_CAPS_VALID_FUNCS_M);
1906 * ice_parse_common_caps - parse common device/function capabilities
1907 * @hw: pointer to the HW struct
1908 * @caps: pointer to common capabilities structure
1909 * @elem: the capability element to parse
1910 * @prefix: message prefix for tracing capabilities
1912 * Given a capability element, extract relevant details into the common
1913 * capability structure.
1915 * Returns: true if the capability matches one of the common capability ids,
1919 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1920 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1922 u32 logical_id = LE32_TO_CPU(elem->logical_id);
1923 u32 phys_id = LE32_TO_CPU(elem->phys_id);
1924 u32 number = LE32_TO_CPU(elem->number);
1925 u16 cap = LE16_TO_CPU(elem->cap);
1929 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1930 caps->valid_functions = number;
1931 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1932 caps->valid_functions);
1934 case ICE_AQC_CAPS_DCB:
1935 caps->dcb = (number == 1);
1936 caps->active_tc_bitmap = logical_id;
1937 caps->maxtc = phys_id;
1938 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1939 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1940 caps->active_tc_bitmap);
1941 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1943 case ICE_AQC_CAPS_RSS:
1944 caps->rss_table_size = number;
1945 caps->rss_table_entry_width = logical_id;
1946 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1947 caps->rss_table_size);
1948 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1949 caps->rss_table_entry_width);
1951 case ICE_AQC_CAPS_RXQS:
1952 caps->num_rxq = number;
1953 caps->rxq_first_id = phys_id;
1954 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1956 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1957 caps->rxq_first_id);
1959 case ICE_AQC_CAPS_TXQS:
1960 caps->num_txq = number;
1961 caps->txq_first_id = phys_id;
1962 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1964 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1965 caps->txq_first_id);
1967 case ICE_AQC_CAPS_MSIX:
1968 caps->num_msix_vectors = number;
1969 caps->msix_vector_first_id = phys_id;
1970 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1971 caps->num_msix_vectors);
1972 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1973 caps->msix_vector_first_id);
1975 case ICE_AQC_CAPS_NVM_MGMT:
1976 caps->sec_rev_disabled =
1977 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
1979 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
1980 caps->sec_rev_disabled);
1981 caps->update_disabled =
1982 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
1984 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
1985 caps->update_disabled);
1986 caps->nvm_unified_update =
1987 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1989 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1990 caps->nvm_unified_update);
1992 case ICE_AQC_CAPS_MAX_MTU:
1993 caps->max_mtu = number;
1994 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1995 prefix, caps->max_mtu);
1998 /* Not one of the recognized common capabilities */
2006 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2007 * @hw: pointer to the HW structure
2008 * @caps: pointer to capabilities structure to fix
2010 * Re-calculate the capabilities that are dependent on the number of physical
2011 * ports; i.e. some features are not supported or function differently on
2012 * devices with more than 4 ports.
2015 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2017 /* This assumes device capabilities are always scanned before function
2018 * capabilities during the initialization flow.
2020 if (hw->dev_caps.num_funcs > 4) {
2021 /* Max 4 TCs per port */
2023 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2029 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2030 * @hw: pointer to the HW struct
2031 * @func_p: pointer to function capabilities structure
2032 * @cap: pointer to the capability element to parse
2034 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2037 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2038 struct ice_aqc_list_caps_elem *cap)
2040 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2041 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2042 LE32_TO_CPU(cap->number));
2043 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2044 func_p->guar_num_vsi);
2048 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
2049 * @hw: pointer to the HW struct
2050 * @func_p: pointer to function capabilities structure
2052 * Extract function capabilities for ICE_AQC_CAPS_FD.
2055 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2059 if (hw->dcf_enabled)
2061 reg_val = rd32(hw, GLQF_FD_SIZE);
2062 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2063 GLQF_FD_SIZE_FD_GSIZE_S;
2064 func_p->fd_fltr_guar =
2065 ice_get_num_per_func(hw, val);
2066 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2067 GLQF_FD_SIZE_FD_BSIZE_S;
2068 func_p->fd_fltr_best_effort = val;
2070 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2071 func_p->fd_fltr_guar);
2072 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2073 func_p->fd_fltr_best_effort);
2077 * ice_parse_func_caps - Parse function capabilities
2078 * @hw: pointer to the HW struct
2079 * @func_p: pointer to function capabilities structure
2080 * @buf: buffer containing the function capability records
2081 * @cap_count: the number of capabilities
2083 * Helper function to parse function (0x000A) capabilities list. For
2084 * capabilities shared between device and function, this relies on
2085 * ice_parse_common_caps.
2087 * Loop through the list of provided capabilities and extract the relevant
2088 * data into the function capabilities structured.
2091 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2092 void *buf, u32 cap_count)
2094 struct ice_aqc_list_caps_elem *cap_resp;
2097 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2099 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2101 for (i = 0; i < cap_count; i++) {
2102 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2105 found = ice_parse_common_caps(hw, &func_p->common_cap,
2106 &cap_resp[i], "func caps");
2109 case ICE_AQC_CAPS_VSI:
2110 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2112 case ICE_AQC_CAPS_FD:
2113 ice_parse_fdir_func_caps(hw, func_p);
2116 /* Don't list common capabilities as unknown */
2118 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2124 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2128 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2129 * @hw: pointer to the HW struct
2130 * @dev_p: pointer to device capabilities structure
2131 * @cap: capability element to parse
2133 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2136 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2137 struct ice_aqc_list_caps_elem *cap)
2139 u32 number = LE32_TO_CPU(cap->number);
2141 dev_p->num_funcs = ice_hweight32(number);
2142 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2147 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2148 * @hw: pointer to the HW struct
2149 * @dev_p: pointer to device capabilities structure
2150 * @cap: capability element to parse
2152 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2155 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2156 struct ice_aqc_list_caps_elem *cap)
2158 u32 number = LE32_TO_CPU(cap->number);
2160 dev_p->num_vsi_allocd_to_host = number;
2161 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2162 dev_p->num_vsi_allocd_to_host);
2166 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
2167 * @hw: pointer to the HW struct
2168 * @dev_p: pointer to device capabilities structure
2169 * @cap: capability element to parse
2171 * Parse ICE_AQC_CAPS_FD for device capabilities.
2174 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2175 struct ice_aqc_list_caps_elem *cap)
2177 u32 number = LE32_TO_CPU(cap->number);
2179 dev_p->num_flow_director_fltr = number;
2180 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2181 dev_p->num_flow_director_fltr);
2185 * ice_parse_dev_caps - Parse device capabilities
2186 * @hw: pointer to the HW struct
2187 * @dev_p: pointer to device capabilities structure
2188 * @buf: buffer containing the device capability records
2189 * @cap_count: the number of capabilities
2191 * Helper device to parse device (0x000B) capabilities list. For
2192 * capabilities shared between device and function, this relies on
2193 * ice_parse_common_caps.
2195 * Loop through the list of provided capabilities and extract the relevant
2196 * data into the device capabilities structured.
2199 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2200 void *buf, u32 cap_count)
2202 struct ice_aqc_list_caps_elem *cap_resp;
2205 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2207 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2209 for (i = 0; i < cap_count; i++) {
2210 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2213 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2214 &cap_resp[i], "dev caps");
2217 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2218 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2220 case ICE_AQC_CAPS_VSI:
2221 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2223 case ICE_AQC_CAPS_FD:
2224 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2227 /* Don't list common capabilities as unknown */
2229 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2235 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2239 * ice_aq_list_caps - query function/device capabilities
2240 * @hw: pointer to the HW struct
2241 * @buf: a buffer to hold the capabilities
2242 * @buf_size: size of the buffer
2243 * @cap_count: if not NULL, set to the number of capabilities reported
2244 * @opc: capabilities type to discover, device or function
2245 * @cd: pointer to command details structure or NULL
2247 * Get the function (0x000A) or device (0x000B) capabilities description from
2248 * firmware and store it in the buffer.
2250 * If the cap_count pointer is not NULL, then it is set to the number of
2251 * capabilities firmware will report. Note that if the buffer size is too
2252 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2253 * cap_count will still be updated in this case. It is recommended that the
2254 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2255 * firmware could return) to avoid this.
2257 static enum ice_status
2258 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2259 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2261 struct ice_aqc_list_caps *cmd;
2262 struct ice_aq_desc desc;
2263 enum ice_status status;
2265 cmd = &desc.params.get_cap;
2267 if (opc != ice_aqc_opc_list_func_caps &&
2268 opc != ice_aqc_opc_list_dev_caps)
2269 return ICE_ERR_PARAM;
2271 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2272 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2275 *cap_count = LE32_TO_CPU(cmd->count);
2281 * ice_discover_dev_caps - Read and extract device capabilities
2282 * @hw: pointer to the hardware structure
2283 * @dev_caps: pointer to device capabilities structure
2285 * Read the device capabilities and extract them into the dev_caps structure
2288 static enum ice_status
2289 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2291 enum ice_status status;
2295 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2297 return ICE_ERR_NO_MEMORY;
2299 /* Although the driver doesn't know the number of capabilities the
2300 * device will return, we can simply send a 4KB buffer, the maximum
2301 * possible size that firmware can return.
2303 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2305 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2306 ice_aqc_opc_list_dev_caps, NULL);
2308 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2315 * ice_discover_func_caps - Read and extract function capabilities
2316 * @hw: pointer to the hardware structure
2317 * @func_caps: pointer to function capabilities structure
2319 * Read the function capabilities and extract them into the func_caps structure
2322 static enum ice_status
2323 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2325 enum ice_status status;
2329 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2331 return ICE_ERR_NO_MEMORY;
2333 /* Although the driver doesn't know the number of capabilities the
2334 * device will return, we can simply send a 4KB buffer, the maximum
2335 * possible size that firmware can return.
2337 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2339 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2340 ice_aqc_opc_list_func_caps, NULL);
2342 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2349 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2350 * @hw: pointer to the hardware structure
2352 void ice_set_safe_mode_caps(struct ice_hw *hw)
2354 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2355 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2356 struct ice_hw_common_caps cached_caps;
2359 /* cache some func_caps values that should be restored after memset */
2360 cached_caps = func_caps->common_cap;
2362 /* unset func capabilities */
2363 memset(func_caps, 0, sizeof(*func_caps));
2365 #define ICE_RESTORE_FUNC_CAP(name) \
2366 func_caps->common_cap.name = cached_caps.name
2368 /* restore cached values */
2369 ICE_RESTORE_FUNC_CAP(valid_functions);
2370 ICE_RESTORE_FUNC_CAP(txq_first_id);
2371 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2372 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2373 ICE_RESTORE_FUNC_CAP(max_mtu);
2374 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2376 /* one Tx and one Rx queue in safe mode */
2377 func_caps->common_cap.num_rxq = 1;
2378 func_caps->common_cap.num_txq = 1;
2380 /* two MSIX vectors, one for traffic and one for misc causes */
2381 func_caps->common_cap.num_msix_vectors = 2;
2382 func_caps->guar_num_vsi = 1;
2384 /* cache some dev_caps values that should be restored after memset */
2385 cached_caps = dev_caps->common_cap;
2386 num_funcs = dev_caps->num_funcs;
2388 /* unset dev capabilities */
2389 memset(dev_caps, 0, sizeof(*dev_caps));
2391 #define ICE_RESTORE_DEV_CAP(name) \
2392 dev_caps->common_cap.name = cached_caps.name
2394 /* restore cached values */
2395 ICE_RESTORE_DEV_CAP(valid_functions);
2396 ICE_RESTORE_DEV_CAP(txq_first_id);
2397 ICE_RESTORE_DEV_CAP(rxq_first_id);
2398 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2399 ICE_RESTORE_DEV_CAP(max_mtu);
2400 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2401 dev_caps->num_funcs = num_funcs;
2403 /* one Tx and one Rx queue per function in safe mode */
2404 dev_caps->common_cap.num_rxq = num_funcs;
2405 dev_caps->common_cap.num_txq = num_funcs;
2407 /* two MSIX vectors per function */
2408 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2412 * ice_get_caps - get info about the HW
2413 * @hw: pointer to the hardware structure
2415 enum ice_status ice_get_caps(struct ice_hw *hw)
2417 enum ice_status status;
2419 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2423 return ice_discover_func_caps(hw, &hw->func_caps);
2427 * ice_aq_manage_mac_write - manage MAC address write command
2428 * @hw: pointer to the HW struct
2429 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2430 * @flags: flags to control write behavior
2431 * @cd: pointer to command details structure or NULL
2433 * This function is used to write MAC address to the NVM (0x0108).
2436 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2437 struct ice_sq_cd *cd)
2439 struct ice_aqc_manage_mac_write *cmd;
2440 struct ice_aq_desc desc;
2442 cmd = &desc.params.mac_write;
2443 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2446 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2448 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2452 * ice_aq_clear_pxe_mode
2453 * @hw: pointer to the HW struct
2455 * Tell the firmware that the driver is taking over from PXE (0x0110).
2457 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2459 struct ice_aq_desc desc;
2461 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2462 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2464 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2468 * ice_clear_pxe_mode - clear pxe operations mode
2469 * @hw: pointer to the HW struct
2471 * Make sure all PXE mode settings are cleared, including things
2472 * like descriptor fetch/write-back mode.
2474 void ice_clear_pxe_mode(struct ice_hw *hw)
2476 if (ice_check_sq_alive(hw, &hw->adminq))
2477 ice_aq_clear_pxe_mode(hw);
2481 * ice_aq_set_port_params - set physical port parameters.
2482 * @pi: pointer to the port info struct
2483 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2484 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2485 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2486 * @double_vlan: if set double VLAN is enabled
2487 * @cd: pointer to command details structure or NULL
2489 * Set Physical port parameters (0x0203)
2492 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2493 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2494 struct ice_sq_cd *cd)
2497 struct ice_aqc_set_port_params *cmd;
2498 struct ice_hw *hw = pi->hw;
2499 struct ice_aq_desc desc;
2502 cmd = &desc.params.set_port_params;
2504 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2505 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2507 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2509 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2511 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2512 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2514 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2518 * ice_get_link_speed_based_on_phy_type - returns link speed
2519 * @phy_type_low: lower part of phy_type
2520 * @phy_type_high: higher part of phy_type
2522 * This helper function will convert an entry in PHY type structure
2523 * [phy_type_low, phy_type_high] to its corresponding link speed.
2524 * Note: In the structure of [phy_type_low, phy_type_high], there should
2525 * be one bit set, as this function will convert one PHY type to its
2527 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2528 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2531 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2533 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2534 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2536 switch (phy_type_low) {
2537 case ICE_PHY_TYPE_LOW_100BASE_TX:
2538 case ICE_PHY_TYPE_LOW_100M_SGMII:
2539 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2541 case ICE_PHY_TYPE_LOW_1000BASE_T:
2542 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2543 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2544 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2545 case ICE_PHY_TYPE_LOW_1G_SGMII:
2546 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2548 case ICE_PHY_TYPE_LOW_2500BASE_T:
2549 case ICE_PHY_TYPE_LOW_2500BASE_X:
2550 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2551 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2553 case ICE_PHY_TYPE_LOW_5GBASE_T:
2554 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2555 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2557 case ICE_PHY_TYPE_LOW_10GBASE_T:
2558 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2559 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2560 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2561 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2562 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2563 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2564 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2566 case ICE_PHY_TYPE_LOW_25GBASE_T:
2567 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2568 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2569 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2570 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2571 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2572 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2573 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2574 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2575 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2576 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2577 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2579 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2580 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2581 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2582 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2583 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2584 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2585 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2587 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2588 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2589 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2590 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2591 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2592 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2593 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2594 case ICE_PHY_TYPE_LOW_50G_AUI2:
2595 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2596 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2597 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2598 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2599 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2600 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2601 case ICE_PHY_TYPE_LOW_50G_AUI1:
2602 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2604 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2605 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2606 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2607 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2608 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2609 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2610 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2611 case ICE_PHY_TYPE_LOW_100G_AUI4:
2612 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2613 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2614 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2615 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2616 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2617 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2620 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2624 switch (phy_type_high) {
2625 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2626 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2627 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2628 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2629 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2630 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2633 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2637 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2638 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2639 return ICE_AQ_LINK_SPEED_UNKNOWN;
2640 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2641 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2642 return ICE_AQ_LINK_SPEED_UNKNOWN;
2643 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2644 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2645 return speed_phy_type_low;
2647 return speed_phy_type_high;
2651 * ice_update_phy_type
2652 * @phy_type_low: pointer to the lower part of phy_type
2653 * @phy_type_high: pointer to the higher part of phy_type
2654 * @link_speeds_bitmap: targeted link speeds bitmap
2656 * Note: For the link_speeds_bitmap structure, you can check it at
2657 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2658 * link_speeds_bitmap include multiple speeds.
2660 * Each entry in this [phy_type_low, phy_type_high] structure will
2661 * present a certain link speed. This helper function will turn on bits
2662 * in [phy_type_low, phy_type_high] structure based on the value of
2663 * link_speeds_bitmap input parameter.
2666 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2667 u16 link_speeds_bitmap)
2674 /* We first check with low part of phy_type */
2675 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2676 pt_low = BIT_ULL(index);
2677 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2679 if (link_speeds_bitmap & speed)
2680 *phy_type_low |= BIT_ULL(index);
2683 /* We then check with high part of phy_type */
2684 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2685 pt_high = BIT_ULL(index);
2686 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2688 if (link_speeds_bitmap & speed)
2689 *phy_type_high |= BIT_ULL(index);
2694 * ice_aq_set_phy_cfg
2695 * @hw: pointer to the HW struct
2696 * @pi: port info structure of the interested logical port
2697 * @cfg: structure with PHY configuration data to be set
2698 * @cd: pointer to command details structure or NULL
2700 * Set the various PHY configuration parameters supported on the Port.
2701 * One or more of the Set PHY config parameters may be ignored in an MFP
2702 * mode as the PF may not have the privilege to set some of the PHY Config
2703 * parameters. This status will be indicated by the command response (0x0601).
2706 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2707 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2709 struct ice_aq_desc desc;
2710 enum ice_status status;
2713 return ICE_ERR_PARAM;
2715 /* Ensure that only valid bits of cfg->caps can be turned on. */
2716 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2717 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2720 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2723 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2724 desc.params.set_phy.lport_num = pi->lport;
2725 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2727 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2728 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2729 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2730 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2731 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2732 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2733 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2734 cfg->low_power_ctrl_an);
2735 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2736 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2737 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2740 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2742 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2743 status = ICE_SUCCESS;
2746 pi->phy.curr_user_phy_cfg = *cfg;
2752 * ice_update_link_info - update status of the HW network link
2753 * @pi: port info structure of the interested logical port
2755 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2757 struct ice_link_status *li;
2758 enum ice_status status;
2761 return ICE_ERR_PARAM;
2763 li = &pi->phy.link_info;
2765 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2769 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2770 struct ice_aqc_get_phy_caps_data *pcaps;
2774 pcaps = (struct ice_aqc_get_phy_caps_data *)
2775 ice_malloc(hw, sizeof(*pcaps));
2777 return ICE_ERR_NO_MEMORY;
2779 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2782 if (status == ICE_SUCCESS)
2783 ice_memcpy(li->module_type, &pcaps->module_type,
2784 sizeof(li->module_type),
2785 ICE_NONDMA_TO_NONDMA);
2787 ice_free(hw, pcaps);
2794 * ice_cache_phy_user_req
2795 * @pi: port information structure
2796 * @cache_data: PHY logging data
2797 * @cache_mode: PHY logging mode
2799 * Log the user request on (FC, FEC, SPEED) for later user.
2802 ice_cache_phy_user_req(struct ice_port_info *pi,
2803 struct ice_phy_cache_mode_data cache_data,
2804 enum ice_phy_cache_mode cache_mode)
2809 switch (cache_mode) {
2811 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2813 case ICE_SPEED_MODE:
2814 pi->phy.curr_user_speed_req =
2815 cache_data.data.curr_user_speed_req;
2818 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2826 * ice_caps_to_fc_mode
2827 * @caps: PHY capabilities
2829 * Convert PHY FC capabilities to ice FC mode
2831 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2833 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2834 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2837 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2838 return ICE_FC_TX_PAUSE;
2840 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2841 return ICE_FC_RX_PAUSE;
2847 * ice_caps_to_fec_mode
2848 * @caps: PHY capabilities
2849 * @fec_options: Link FEC options
2851 * Convert PHY FEC capabilities to ice FEC mode
2853 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2855 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2856 return ICE_FEC_AUTO;
2858 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2859 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2860 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2861 ICE_AQC_PHY_FEC_25G_KR_REQ))
2862 return ICE_FEC_BASER;
2864 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2865 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2866 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2869 return ICE_FEC_NONE;
2873 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2874 * @pi: port information structure
2875 * @cfg: PHY configuration data to set FC mode
2876 * @req_mode: FC mode to configure
2878 static enum ice_status
2879 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2880 enum ice_fc_mode req_mode)
2882 struct ice_phy_cache_mode_data cache_data;
2883 u8 pause_mask = 0x0;
2886 return ICE_ERR_BAD_PTR;
2891 struct ice_aqc_get_phy_caps_data *pcaps;
2892 enum ice_status status;
2894 pcaps = (struct ice_aqc_get_phy_caps_data *)
2895 ice_malloc(pi->hw, sizeof(*pcaps));
2897 return ICE_ERR_NO_MEMORY;
2899 /* Query the value of FC that both the NIC and attached media
2902 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2905 ice_free(pi->hw, pcaps);
2909 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2910 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2912 ice_free(pi->hw, pcaps);
2916 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2917 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2919 case ICE_FC_RX_PAUSE:
2920 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2922 case ICE_FC_TX_PAUSE:
2923 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2929 /* clear the old pause settings */
2930 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2931 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2933 /* set the new capabilities */
2934 cfg->caps |= pause_mask;
2936 /* Cache user FC request */
2937 cache_data.data.curr_user_fc_req = req_mode;
2938 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2945 * @pi: port information structure
2946 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2947 * @ena_auto_link_update: enable automatic link update
2949 * Set the requested flow control mode.
2952 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2954 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2955 struct ice_aqc_get_phy_caps_data *pcaps;
2956 enum ice_status status;
2959 if (!pi || !aq_failures)
2960 return ICE_ERR_BAD_PTR;
2965 pcaps = (struct ice_aqc_get_phy_caps_data *)
2966 ice_malloc(hw, sizeof(*pcaps));
2968 return ICE_ERR_NO_MEMORY;
2970 /* Get the current PHY config */
2971 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
2975 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2979 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2981 /* Configure the set PHY data */
2982 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2984 if (status != ICE_ERR_BAD_PTR)
2985 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2990 /* If the capabilities have changed, then set the new config */
2991 if (cfg.caps != pcaps->caps) {
2992 int retry_count, retry_max = 10;
2994 /* Auto restart link so settings take effect */
2995 if (ena_auto_link_update)
2996 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2998 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3000 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3004 /* Update the link info
3005 * It sometimes takes a really long time for link to
3006 * come back from the atomic reset. Thus, we wait a
3009 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3010 status = ice_update_link_info(pi);
3012 if (status == ICE_SUCCESS)
3015 ice_msec_delay(100, true);
3019 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3023 ice_free(hw, pcaps);
3028 * ice_phy_caps_equals_cfg
3029 * @phy_caps: PHY capabilities
3030 * @phy_cfg: PHY configuration
3032 * Helper function to determine if PHY capabilities matches PHY
3036 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3037 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3039 u8 caps_mask, cfg_mask;
3041 if (!phy_caps || !phy_cfg)
3044 /* These bits are not common between capabilities and configuration.
3045 * Do not use them to determine equality.
3047 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3048 ICE_AQC_PHY_EN_MOD_QUAL);
3049 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3051 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3052 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3053 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3054 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3055 phy_caps->eee_cap != phy_cfg->eee_cap ||
3056 phy_caps->eeer_value != phy_cfg->eeer_value ||
3057 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3064 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3065 * @pi: port information structure
3066 * @caps: PHY ability structure to copy date from
3067 * @cfg: PHY configuration structure to copy data to
3069 * Helper function to copy AQC PHY get ability data to PHY set configuration
3073 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3074 struct ice_aqc_get_phy_caps_data *caps,
3075 struct ice_aqc_set_phy_cfg_data *cfg)
3077 if (!pi || !caps || !cfg)
3080 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3081 cfg->phy_type_low = caps->phy_type_low;
3082 cfg->phy_type_high = caps->phy_type_high;
3083 cfg->caps = caps->caps;
3084 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3085 cfg->eee_cap = caps->eee_cap;
3086 cfg->eeer_value = caps->eeer_value;
3087 cfg->link_fec_opt = caps->link_fec_options;
3088 cfg->module_compliance_enforcement =
3089 caps->module_compliance_enforcement;
3093 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3094 * @pi: port information structure
3095 * @cfg: PHY configuration data to set FEC mode
3096 * @fec: FEC mode to configure
3099 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3100 enum ice_fec_mode fec)
3102 struct ice_aqc_get_phy_caps_data *pcaps;
3103 enum ice_status status = ICE_SUCCESS;
3107 return ICE_ERR_BAD_PTR;
3111 pcaps = (struct ice_aqc_get_phy_caps_data *)
3112 ice_malloc(hw, sizeof(*pcaps));
3114 return ICE_ERR_NO_MEMORY;
3116 status = ice_aq_get_phy_caps(pi, false,
3117 (ice_fw_supports_report_dflt_cfg(hw) ?
3118 ICE_AQC_REPORT_DFLT_CFG :
3119 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3124 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3125 cfg->link_fec_opt = pcaps->link_fec_options;
3129 /* Clear RS bits, and AND BASE-R ability
3130 * bits and OR request bits.
3132 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3133 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3134 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3135 ICE_AQC_PHY_FEC_25G_KR_REQ;
3138 /* Clear BASE-R bits, and AND RS ability
3139 * bits and OR request bits.
3141 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3142 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3143 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3146 /* Clear all FEC option bits. */
3147 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3150 /* AND auto FEC bit, and all caps bits. */
3151 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3152 cfg->link_fec_opt |= pcaps->link_fec_options;
3155 status = ICE_ERR_PARAM;
3159 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
3160 !ice_fw_supports_report_dflt_cfg(pi->hw)) {
3161 struct ice_link_default_override_tlv tlv;
3163 if (ice_get_link_default_override(&tlv, pi))
3166 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3167 (tlv.options & ICE_LINK_OVERRIDE_EN))
3168 cfg->link_fec_opt = tlv.fec_options;
3172 ice_free(hw, pcaps);
3178 * ice_get_link_status - get status of the HW network link
3179 * @pi: port information structure
3180 * @link_up: pointer to bool (true/false = linkup/linkdown)
3182 * Variable link_up is true if link is up, false if link is down.
3183 * The variable link_up is invalid if status is non zero. As a
3184 * result of this call, link status reporting becomes enabled
3186 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3188 struct ice_phy_info *phy_info;
3189 enum ice_status status = ICE_SUCCESS;
3191 if (!pi || !link_up)
3192 return ICE_ERR_PARAM;
3194 phy_info = &pi->phy;
3196 if (phy_info->get_link_info) {
3197 status = ice_update_link_info(pi);
3200 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3204 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3210 * ice_aq_set_link_restart_an
3211 * @pi: pointer to the port information structure
3212 * @ena_link: if true: enable link, if false: disable link
3213 * @cd: pointer to command details structure or NULL
3215 * Sets up the link and restarts the Auto-Negotiation over the link.
3218 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3219 struct ice_sq_cd *cd)
3221 struct ice_aqc_restart_an *cmd;
3222 struct ice_aq_desc desc;
3224 cmd = &desc.params.restart_an;
3226 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3228 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3229 cmd->lport_num = pi->lport;
3231 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3233 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3235 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3239 * ice_aq_set_event_mask
3240 * @hw: pointer to the HW struct
3241 * @port_num: port number of the physical function
3242 * @mask: event mask to be set
3243 * @cd: pointer to command details structure or NULL
3245 * Set event mask (0x0613)
3248 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3249 struct ice_sq_cd *cd)
3251 struct ice_aqc_set_event_mask *cmd;
3252 struct ice_aq_desc desc;
3254 cmd = &desc.params.set_event_mask;
3256 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3258 cmd->lport_num = port_num;
3260 cmd->event_mask = CPU_TO_LE16(mask);
3261 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3265 * ice_aq_set_mac_loopback
3266 * @hw: pointer to the HW struct
3267 * @ena_lpbk: Enable or Disable loopback
3268 * @cd: pointer to command details structure or NULL
3270 * Enable/disable loopback on a given port
3273 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3275 struct ice_aqc_set_mac_lb *cmd;
3276 struct ice_aq_desc desc;
3278 cmd = &desc.params.set_mac_lb;
3280 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3282 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3284 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3288 * ice_aq_set_port_id_led
3289 * @pi: pointer to the port information
3290 * @is_orig_mode: is this LED set to original mode (by the net-list)
3291 * @cd: pointer to command details structure or NULL
3293 * Set LED value for the given port (0x06e9)
3296 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3297 struct ice_sq_cd *cd)
3299 struct ice_aqc_set_port_id_led *cmd;
3300 struct ice_hw *hw = pi->hw;
3301 struct ice_aq_desc desc;
3303 cmd = &desc.params.set_port_id_led;
3305 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3308 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3310 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3312 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3317 * @hw: pointer to the HW struct
3318 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3319 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3320 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3322 * @set_page: set or ignore the page
3323 * @data: pointer to data buffer to be read/written to the I2C device.
3324 * @length: 1-16 for read, 1 for write.
3325 * @write: 0 read, 1 for write.
3326 * @cd: pointer to command details structure or NULL
3328 * Read/Write SFF EEPROM (0x06EE)
3331 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3332 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3333 bool write, struct ice_sq_cd *cd)
3335 struct ice_aqc_sff_eeprom *cmd;
3336 struct ice_aq_desc desc;
3337 enum ice_status status;
3339 if (!data || (mem_addr & 0xff00))
3340 return ICE_ERR_PARAM;
3342 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3343 cmd = &desc.params.read_write_sff_param;
3344 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3345 cmd->lport_num = (u8)(lport & 0xff);
3346 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3347 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3348 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3350 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3351 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3352 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3353 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3355 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3357 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3362 * __ice_aq_get_set_rss_lut
3363 * @hw: pointer to the hardware structure
3364 * @params: RSS LUT parameters
3365 * @set: set true to set the table, false to get the table
3367 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3369 static enum ice_status
3370 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3372 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3373 struct ice_aqc_get_set_rss_lut *cmd_resp;
3374 struct ice_aq_desc desc;
3375 enum ice_status status;
3379 return ICE_ERR_PARAM;
3381 vsi_handle = params->vsi_handle;
3384 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3385 return ICE_ERR_PARAM;
3387 lut_size = params->lut_size;
3388 lut_type = params->lut_type;
3389 glob_lut_idx = params->global_lut_id;
3390 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3392 cmd_resp = &desc.params.get_set_rss_lut;
3395 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3396 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3398 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3401 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3402 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3403 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3404 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3407 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3408 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3409 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3410 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3411 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3414 status = ICE_ERR_PARAM;
3415 goto ice_aq_get_set_rss_lut_exit;
3418 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3419 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3420 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3423 goto ice_aq_get_set_rss_lut_send;
3424 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3426 goto ice_aq_get_set_rss_lut_send;
3428 goto ice_aq_get_set_rss_lut_send;
3431 /* LUT size is only valid for Global and PF table types */
3433 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3434 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3435 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3436 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3438 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3439 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3440 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3441 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3443 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3444 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3445 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3446 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3447 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3452 status = ICE_ERR_PARAM;
3453 goto ice_aq_get_set_rss_lut_exit;
3456 ice_aq_get_set_rss_lut_send:
3457 cmd_resp->flags = CPU_TO_LE16(flags);
3458 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3460 ice_aq_get_set_rss_lut_exit:
3465 * ice_aq_get_rss_lut
3466 * @hw: pointer to the hardware structure
3467 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3469 * get the RSS lookup table, PF or VSI type
3472 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3474 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3478 * ice_aq_set_rss_lut
3479 * @hw: pointer to the hardware structure
3480 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3482 * set the RSS lookup table, PF or VSI type
3485 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3487 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3491 * __ice_aq_get_set_rss_key
3492 * @hw: pointer to the HW struct
3493 * @vsi_id: VSI FW index
3494 * @key: pointer to key info struct
3495 * @set: set true to set the key, false to get the key
3497 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3500 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3501 struct ice_aqc_get_set_rss_keys *key,
3504 struct ice_aqc_get_set_rss_key *cmd_resp;
3505 u16 key_size = sizeof(*key);
3506 struct ice_aq_desc desc;
3508 cmd_resp = &desc.params.get_set_rss_key;
3511 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3512 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3514 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3517 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3518 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3519 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3520 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3522 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3526 * ice_aq_get_rss_key
3527 * @hw: pointer to the HW struct
3528 * @vsi_handle: software VSI handle
3529 * @key: pointer to key info struct
3531 * get the RSS key per VSI
3534 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3535 struct ice_aqc_get_set_rss_keys *key)
3537 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3538 return ICE_ERR_PARAM;
3540 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3545 * ice_aq_set_rss_key
3546 * @hw: pointer to the HW struct
3547 * @vsi_handle: software VSI handle
3548 * @keys: pointer to key info struct
3550 * set the RSS key per VSI
3553 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3554 struct ice_aqc_get_set_rss_keys *keys)
3556 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3557 return ICE_ERR_PARAM;
3559 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3564 * ice_aq_add_lan_txq
3565 * @hw: pointer to the hardware structure
3566 * @num_qgrps: Number of added queue groups
3567 * @qg_list: list of queue groups to be added
3568 * @buf_size: size of buffer for indirect command
3569 * @cd: pointer to command details structure or NULL
3571 * Add Tx LAN queue (0x0C30)
3574 * Prior to calling add Tx LAN queue:
3575 * Initialize the following as part of the Tx queue context:
3576 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3577 * Cache profile and Packet shaper profile.
3579 * After add Tx LAN queue AQ command is completed:
3580 * Interrupts should be associated with specific queues,
3581 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3585 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3586 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3587 struct ice_sq_cd *cd)
3589 struct ice_aqc_add_tx_qgrp *list;
3590 struct ice_aqc_add_txqs *cmd;
3591 struct ice_aq_desc desc;
3592 u16 i, sum_size = 0;
3594 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3596 cmd = &desc.params.add_txqs;
3598 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3601 return ICE_ERR_PARAM;
3603 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3604 return ICE_ERR_PARAM;
3606 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3607 sum_size += ice_struct_size(list, txqs, list->num_txqs);
3608 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3612 if (buf_size != sum_size)
3613 return ICE_ERR_PARAM;
3615 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3617 cmd->num_qgrps = num_qgrps;
3619 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3623 * ice_aq_dis_lan_txq
3624 * @hw: pointer to the hardware structure
3625 * @num_qgrps: number of groups in the list
3626 * @qg_list: the list of groups to disable
3627 * @buf_size: the total size of the qg_list buffer in bytes
3628 * @rst_src: if called due to reset, specifies the reset source
3629 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3630 * @cd: pointer to command details structure or NULL
3632 * Disable LAN Tx queue (0x0C31)
3634 static enum ice_status
3635 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3636 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3637 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3638 struct ice_sq_cd *cd)
3640 struct ice_aqc_dis_txq_item *item;
3641 struct ice_aqc_dis_txqs *cmd;
3642 struct ice_aq_desc desc;
3643 enum ice_status status;
3646 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3647 cmd = &desc.params.dis_txqs;
3648 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3650 /* qg_list can be NULL only in VM/VF reset flow */
3651 if (!qg_list && !rst_src)
3652 return ICE_ERR_PARAM;
3654 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3655 return ICE_ERR_PARAM;
3657 cmd->num_entries = num_qgrps;
3659 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3660 ICE_AQC_Q_DIS_TIMEOUT_M);
3664 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3665 cmd->vmvf_and_timeout |=
3666 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3673 /* flush pipe on time out */
3674 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3675 /* If no queue group info, we are in a reset flow. Issue the AQ */
3679 /* set RD bit to indicate that command buffer is provided by the driver
3680 * and it needs to be read by the firmware
3682 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3684 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3685 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
3687 /* If the num of queues is even, add 2 bytes of padding */
3688 if ((item->num_qs % 2) == 0)
3693 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3697 return ICE_ERR_PARAM;
3700 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3703 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3704 vmvf_num, hw->adminq.sq_last_status);
3706 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3707 LE16_TO_CPU(qg_list[0].q_id[0]),
3708 hw->adminq.sq_last_status);
3714 * ice_aq_move_recfg_lan_txq
3715 * @hw: pointer to the hardware structure
3716 * @num_qs: number of queues to move/reconfigure
3717 * @is_move: true if this operation involves node movement
3718 * @is_tc_change: true if this operation involves a TC change
3719 * @subseq_call: true if this operation is a subsequent call
3720 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3721 * @timeout: timeout in units of 100 usec (valid values 0-50)
3722 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3723 * @buf: struct containing src/dest TEID and per-queue info
3724 * @buf_size: size of buffer for indirect command
3725 * @txqs_moved: out param, number of queues successfully moved
3726 * @cd: pointer to command details structure or NULL
3728 * Move / Reconfigure Tx LAN queues (0x0C32)
3731 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3732 bool is_tc_change, bool subseq_call, bool flush_pipe,
3733 u8 timeout, u32 *blocked_cgds,
3734 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3735 u8 *txqs_moved, struct ice_sq_cd *cd)
3737 struct ice_aqc_move_txqs *cmd;
3738 struct ice_aq_desc desc;
3739 enum ice_status status;
3741 cmd = &desc.params.move_txqs;
3742 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3744 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3745 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3746 return ICE_ERR_PARAM;
3748 if (is_tc_change && !flush_pipe && !blocked_cgds)
3749 return ICE_ERR_PARAM;
3751 if (!is_move && !is_tc_change)
3752 return ICE_ERR_PARAM;
3754 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3757 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3760 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3763 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3766 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3768 cmd->num_qs = num_qs;
3769 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3770 ICE_AQC_Q_CMD_TIMEOUT_M);
3772 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3774 if (!status && txqs_moved)
3775 *txqs_moved = cmd->num_qs;
3777 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3778 is_tc_change && !flush_pipe)
3779 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3784 /* End of FW Admin Queue command wrappers */
3787 * ice_write_byte - write a byte to a packed context structure
3788 * @src_ctx: the context structure to read from
3789 * @dest_ctx: the context to be written to
3790 * @ce_info: a description of the struct to be filled
3793 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3795 u8 src_byte, dest_byte, mask;
3799 /* copy from the next struct field */
3800 from = src_ctx + ce_info->offset;
3802 /* prepare the bits and mask */
3803 shift_width = ce_info->lsb % 8;
3804 mask = (u8)(BIT(ce_info->width) - 1);
3809 /* shift to correct alignment */
3810 mask <<= shift_width;
3811 src_byte <<= shift_width;
3813 /* get the current bits from the target bit string */
3814 dest = dest_ctx + (ce_info->lsb / 8);
3816 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3818 dest_byte &= ~mask; /* get the bits not changing */
3819 dest_byte |= src_byte; /* add in the new bits */
3821 /* put it all back */
3822 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3826 * ice_write_word - write a word to a packed context structure
3827 * @src_ctx: the context structure to read from
3828 * @dest_ctx: the context to be written to
3829 * @ce_info: a description of the struct to be filled
3832 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3839 /* copy from the next struct field */
3840 from = src_ctx + ce_info->offset;
3842 /* prepare the bits and mask */
3843 shift_width = ce_info->lsb % 8;
3844 mask = BIT(ce_info->width) - 1;
3846 /* don't swizzle the bits until after the mask because the mask bits
3847 * will be in a different bit position on big endian machines
3849 src_word = *(u16 *)from;
3852 /* shift to correct alignment */
3853 mask <<= shift_width;
3854 src_word <<= shift_width;
3856 /* get the current bits from the target bit string */
3857 dest = dest_ctx + (ce_info->lsb / 8);
3859 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3861 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3862 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3864 /* put it all back */
3865 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3869 * ice_write_dword - write a dword to a packed context structure
3870 * @src_ctx: the context structure to read from
3871 * @dest_ctx: the context to be written to
3872 * @ce_info: a description of the struct to be filled
3875 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3877 u32 src_dword, mask;
3882 /* copy from the next struct field */
3883 from = src_ctx + ce_info->offset;
3885 /* prepare the bits and mask */
3886 shift_width = ce_info->lsb % 8;
3888 /* if the field width is exactly 32 on an x86 machine, then the shift
3889 * operation will not work because the SHL instructions count is masked
3890 * to 5 bits so the shift will do nothing
3892 if (ce_info->width < 32)
3893 mask = BIT(ce_info->width) - 1;
3897 /* don't swizzle the bits until after the mask because the mask bits
3898 * will be in a different bit position on big endian machines
3900 src_dword = *(u32 *)from;
3903 /* shift to correct alignment */
3904 mask <<= shift_width;
3905 src_dword <<= shift_width;
3907 /* get the current bits from the target bit string */
3908 dest = dest_ctx + (ce_info->lsb / 8);
3910 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3912 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3913 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3915 /* put it all back */
3916 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3920 * ice_write_qword - write a qword to a packed context structure
3921 * @src_ctx: the context structure to read from
3922 * @dest_ctx: the context to be written to
3923 * @ce_info: a description of the struct to be filled
3926 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3928 u64 src_qword, mask;
3933 /* copy from the next struct field */
3934 from = src_ctx + ce_info->offset;
3936 /* prepare the bits and mask */
3937 shift_width = ce_info->lsb % 8;
3939 /* if the field width is exactly 64 on an x86 machine, then the shift
3940 * operation will not work because the SHL instructions count is masked
3941 * to 6 bits so the shift will do nothing
3943 if (ce_info->width < 64)
3944 mask = BIT_ULL(ce_info->width) - 1;
3948 /* don't swizzle the bits until after the mask because the mask bits
3949 * will be in a different bit position on big endian machines
3951 src_qword = *(u64 *)from;
3954 /* shift to correct alignment */
3955 mask <<= shift_width;
3956 src_qword <<= shift_width;
3958 /* get the current bits from the target bit string */
3959 dest = dest_ctx + (ce_info->lsb / 8);
3961 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3963 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
3964 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
3966 /* put it all back */
3967 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3971 * ice_set_ctx - set context bits in packed structure
3972 * @hw: pointer to the hardware structure
3973 * @src_ctx: pointer to a generic non-packed context structure
3974 * @dest_ctx: pointer to memory for the packed structure
3975 * @ce_info: a description of the structure to be transformed
3978 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3979 const struct ice_ctx_ele *ce_info)
3983 for (f = 0; ce_info[f].width; f++) {
3984 /* We have to deal with each element of the FW response
3985 * using the correct size so that we are correct regardless
3986 * of the endianness of the machine.
3988 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3989 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3990 f, ce_info[f].width, ce_info[f].size_of);
3993 switch (ce_info[f].size_of) {
3995 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3998 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4001 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4004 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4007 return ICE_ERR_INVAL_SIZE;
4015 * ice_read_byte - read context byte into struct
4016 * @src_ctx: the context structure to read from
4017 * @dest_ctx: the context to be written to
4018 * @ce_info: a description of the struct to be filled
4021 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4027 /* prepare the bits and mask */
4028 shift_width = ce_info->lsb % 8;
4029 mask = (u8)(BIT(ce_info->width) - 1);
4031 /* shift to correct alignment */
4032 mask <<= shift_width;
4034 /* get the current bits from the src bit string */
4035 src = src_ctx + (ce_info->lsb / 8);
4037 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4039 dest_byte &= ~(mask);
4041 dest_byte >>= shift_width;
4043 /* get the address from the struct field */
4044 target = dest_ctx + ce_info->offset;
4046 /* put it back in the struct */
4047 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4051 * ice_read_word - read context word into struct
4052 * @src_ctx: the context structure to read from
4053 * @dest_ctx: the context to be written to
4054 * @ce_info: a description of the struct to be filled
4057 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4059 u16 dest_word, mask;
4064 /* prepare the bits and mask */
4065 shift_width = ce_info->lsb % 8;
4066 mask = BIT(ce_info->width) - 1;
4068 /* shift to correct alignment */
4069 mask <<= shift_width;
4071 /* get the current bits from the src bit string */
4072 src = src_ctx + (ce_info->lsb / 8);
4074 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4076 /* the data in the memory is stored as little endian so mask it
4079 src_word &= ~(CPU_TO_LE16(mask));
4081 /* get the data back into host order before shifting */
4082 dest_word = LE16_TO_CPU(src_word);
4084 dest_word >>= shift_width;
4086 /* get the address from the struct field */
4087 target = dest_ctx + ce_info->offset;
4089 /* put it back in the struct */
4090 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4094 * ice_read_dword - read context dword into struct
4095 * @src_ctx: the context structure to read from
4096 * @dest_ctx: the context to be written to
4097 * @ce_info: a description of the struct to be filled
4100 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4102 u32 dest_dword, mask;
4107 /* prepare the bits and mask */
4108 shift_width = ce_info->lsb % 8;
4110 /* if the field width is exactly 32 on an x86 machine, then the shift
4111 * operation will not work because the SHL instructions count is masked
4112 * to 5 bits so the shift will do nothing
4114 if (ce_info->width < 32)
4115 mask = BIT(ce_info->width) - 1;
4119 /* shift to correct alignment */
4120 mask <<= shift_width;
4122 /* get the current bits from the src bit string */
4123 src = src_ctx + (ce_info->lsb / 8);
4125 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4127 /* the data in the memory is stored as little endian so mask it
4130 src_dword &= ~(CPU_TO_LE32(mask));
4132 /* get the data back into host order before shifting */
4133 dest_dword = LE32_TO_CPU(src_dword);
4135 dest_dword >>= shift_width;
4137 /* get the address from the struct field */
4138 target = dest_ctx + ce_info->offset;
4140 /* put it back in the struct */
4141 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4145 * ice_read_qword - read context qword into struct
4146 * @src_ctx: the context structure to read from
4147 * @dest_ctx: the context to be written to
4148 * @ce_info: a description of the struct to be filled
4151 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4153 u64 dest_qword, mask;
4158 /* prepare the bits and mask */
4159 shift_width = ce_info->lsb % 8;
4161 /* if the field width is exactly 64 on an x86 machine, then the shift
4162 * operation will not work because the SHL instructions count is masked
4163 * to 6 bits so the shift will do nothing
4165 if (ce_info->width < 64)
4166 mask = BIT_ULL(ce_info->width) - 1;
4170 /* shift to correct alignment */
4171 mask <<= shift_width;
4173 /* get the current bits from the src bit string */
4174 src = src_ctx + (ce_info->lsb / 8);
4176 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4178 /* the data in the memory is stored as little endian so mask it
4181 src_qword &= ~(CPU_TO_LE64(mask));
4183 /* get the data back into host order before shifting */
4184 dest_qword = LE64_TO_CPU(src_qword);
4186 dest_qword >>= shift_width;
4188 /* get the address from the struct field */
4189 target = dest_ctx + ce_info->offset;
4191 /* put it back in the struct */
4192 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4196 * ice_get_ctx - extract context bits from a packed structure
4197 * @src_ctx: pointer to a generic packed context structure
4198 * @dest_ctx: pointer to a generic non-packed context structure
4199 * @ce_info: a description of the structure to be read from
4202 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4206 for (f = 0; ce_info[f].width; f++) {
4207 switch (ce_info[f].size_of) {
4209 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4212 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4215 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4218 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4221 /* nothing to do, just keep going */
4230 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4231 * @hw: pointer to the HW struct
4232 * @vsi_handle: software VSI handle
4234 * @q_handle: software queue handle
4237 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4239 struct ice_vsi_ctx *vsi;
4240 struct ice_q_ctx *q_ctx;
4242 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4245 if (q_handle >= vsi->num_lan_q_entries[tc])
4247 if (!vsi->lan_q_ctx[tc])
4249 q_ctx = vsi->lan_q_ctx[tc];
4250 return &q_ctx[q_handle];
4255 * @pi: port information structure
4256 * @vsi_handle: software VSI handle
4258 * @q_handle: software queue handle
4259 * @num_qgrps: Number of added queue groups
4260 * @buf: list of queue groups to be added
4261 * @buf_size: size of buffer for indirect command
4262 * @cd: pointer to command details structure or NULL
4264 * This function adds one LAN queue
4267 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4268 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4269 struct ice_sq_cd *cd)
4271 struct ice_aqc_txsched_elem_data node = { 0 };
4272 struct ice_sched_node *parent;
4273 struct ice_q_ctx *q_ctx;
4274 enum ice_status status;
4277 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4280 if (num_qgrps > 1 || buf->num_txqs > 1)
4281 return ICE_ERR_MAX_LIMIT;
4285 if (!ice_is_vsi_valid(hw, vsi_handle))
4286 return ICE_ERR_PARAM;
4288 ice_acquire_lock(&pi->sched_lock);
4290 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4292 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4294 status = ICE_ERR_PARAM;
4298 /* find a parent node */
4299 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4300 ICE_SCHED_NODE_OWNER_LAN);
4302 status = ICE_ERR_PARAM;
4306 buf->parent_teid = parent->info.node_teid;
4307 node.parent_teid = parent->info.node_teid;
4308 /* Mark that the values in the "generic" section as valid. The default
4309 * value in the "generic" section is zero. This means that :
4310 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4311 * - 0 priority among siblings, indicated by Bit 1-3.
4312 * - WFQ, indicated by Bit 4.
4313 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4315 * - Bit 7 is reserved.
4316 * Without setting the generic section as valid in valid_sections, the
4317 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4319 buf->txqs[0].info.valid_sections =
4320 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4321 ICE_AQC_ELEM_VALID_EIR;
4322 buf->txqs[0].info.generic = 0;
4323 buf->txqs[0].info.cir_bw.bw_profile_idx =
4324 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4325 buf->txqs[0].info.cir_bw.bw_alloc =
4326 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4327 buf->txqs[0].info.eir_bw.bw_profile_idx =
4328 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4329 buf->txqs[0].info.eir_bw.bw_alloc =
4330 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4332 /* add the LAN queue */
4333 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4334 if (status != ICE_SUCCESS) {
4335 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4336 LE16_TO_CPU(buf->txqs[0].txq_id),
4337 hw->adminq.sq_last_status);
4341 node.node_teid = buf->txqs[0].q_teid;
4342 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4343 q_ctx->q_handle = q_handle;
4344 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4346 /* add a leaf node into scheduler tree queue layer */
4347 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4349 status = ice_sched_replay_q_bw(pi, q_ctx);
4352 ice_release_lock(&pi->sched_lock);
4358 * @pi: port information structure
4359 * @vsi_handle: software VSI handle
4361 * @num_queues: number of queues
4362 * @q_handles: pointer to software queue handle array
4363 * @q_ids: pointer to the q_id array
4364 * @q_teids: pointer to queue node teids
4365 * @rst_src: if called due to reset, specifies the reset source
4366 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4367 * @cd: pointer to command details structure or NULL
4369 * This function removes queues and their corresponding nodes in SW DB
4372 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4373 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4374 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4375 struct ice_sq_cd *cd)
4377 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4378 struct ice_aqc_dis_txq_item *qg_list;
4379 struct ice_q_ctx *q_ctx;
4383 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4389 /* if queue is disabled already yet the disable queue command
4390 * has to be sent to complete the VF reset, then call
4391 * ice_aq_dis_lan_txq without any queue information
4394 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4399 buf_size = ice_struct_size(qg_list, q_id, 1);
4400 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
4402 return ICE_ERR_NO_MEMORY;
4404 ice_acquire_lock(&pi->sched_lock);
4406 for (i = 0; i < num_queues; i++) {
4407 struct ice_sched_node *node;
4409 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4412 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4414 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4418 if (q_ctx->q_handle != q_handles[i]) {
4419 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4420 q_ctx->q_handle, q_handles[i]);
4423 qg_list->parent_teid = node->info.parent_teid;
4424 qg_list->num_qs = 1;
4425 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
4426 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4429 if (status != ICE_SUCCESS)
4431 ice_free_sched_node(pi, node);
4432 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4434 ice_release_lock(&pi->sched_lock);
4435 ice_free(hw, qg_list);
4440 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4441 * @pi: port information structure
4442 * @vsi_handle: software VSI handle
4443 * @tc_bitmap: TC bitmap
4444 * @maxqs: max queues array per TC
4445 * @owner: LAN or RDMA
4447 * This function adds/updates the VSI queues per TC.
4449 static enum ice_status
4450 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4451 u16 *maxqs, u8 owner)
4453 enum ice_status status = ICE_SUCCESS;
4456 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4459 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4460 return ICE_ERR_PARAM;
4462 ice_acquire_lock(&pi->sched_lock);
4464 ice_for_each_traffic_class(i) {
4465 /* configuration is possible only if TC node is present */
4466 if (!ice_sched_get_tc_node(pi, i))
4469 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4470 ice_is_tc_ena(tc_bitmap, i));
4475 ice_release_lock(&pi->sched_lock);
4480 * ice_cfg_vsi_lan - configure VSI LAN queues
4481 * @pi: port information structure
4482 * @vsi_handle: software VSI handle
4483 * @tc_bitmap: TC bitmap
4484 * @max_lanqs: max LAN queues array per TC
4486 * This function adds/updates the VSI LAN queues per TC.
4489 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4492 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4493 ICE_SCHED_NODE_OWNER_LAN);
4497 * ice_is_main_vsi - checks whether the VSI is main VSI
4498 * @hw: pointer to the HW struct
4499 * @vsi_handle: VSI handle
4501 * Checks whether the VSI is the main VSI (the first PF VSI created on
4504 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4506 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4510 * ice_replay_pre_init - replay pre initialization
4511 * @hw: pointer to the HW struct
4512 * @sw: pointer to switch info struct for which function initializes filters
4514 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4516 static enum ice_status
4517 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4519 enum ice_status status;
4522 /* Delete old entries from replay filter list head if there is any */
4523 ice_rm_sw_replay_rule_info(hw, sw);
4524 /* In start of replay, move entries into replay_rules list, it
4525 * will allow adding rules entries back to filt_rules list,
4526 * which is operational list.
4528 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4529 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4530 &sw->recp_list[i].filt_replay_rules);
4531 ice_sched_replay_agg_vsi_preinit(hw);
4533 status = ice_sched_replay_root_node_bw(hw->port_info);
4537 return ice_sched_replay_tc_node_bw(hw->port_info);
4541 * ice_replay_vsi - replay VSI configuration
4542 * @hw: pointer to the HW struct
4543 * @vsi_handle: driver VSI handle
4545 * Restore all VSI configuration after reset. It is required to call this
4546 * function with main VSI first.
4548 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4550 struct ice_switch_info *sw = hw->switch_info;
4551 struct ice_port_info *pi = hw->port_info;
4552 enum ice_status status;
4554 if (!ice_is_vsi_valid(hw, vsi_handle))
4555 return ICE_ERR_PARAM;
4557 /* Replay pre-initialization if there is any */
4558 if (ice_is_main_vsi(hw, vsi_handle)) {
4559 status = ice_replay_pre_init(hw, sw);
4563 /* Replay per VSI all RSS configurations */
4564 status = ice_replay_rss_cfg(hw, vsi_handle);
4567 /* Replay per VSI all filters */
4568 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4570 status = ice_replay_vsi_agg(hw, vsi_handle);
4575 * ice_replay_post - post replay configuration cleanup
4576 * @hw: pointer to the HW struct
4578 * Post replay cleanup.
4580 void ice_replay_post(struct ice_hw *hw)
4582 /* Delete old entries from replay filter list head */
4583 ice_rm_all_sw_replay_rule_info(hw);
4584 ice_sched_replay_agg(hw);
4588 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4589 * @hw: ptr to the hardware info
4590 * @reg: offset of 64 bit HW register to read from
4591 * @prev_stat_loaded: bool to specify if previous stats are loaded
4592 * @prev_stat: ptr to previous loaded stat value
4593 * @cur_stat: ptr to current stat value
4596 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4597 u64 *prev_stat, u64 *cur_stat)
4599 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4601 /* device stats are not reset at PFR, they likely will not be zeroed
4602 * when the driver starts. Thus, save the value from the first read
4603 * without adding to the statistic value so that we report stats which
4604 * count up from zero.
4606 if (!prev_stat_loaded) {
4607 *prev_stat = new_data;
4611 /* Calculate the difference between the new and old values, and then
4612 * add it to the software stat value.
4614 if (new_data >= *prev_stat)
4615 *cur_stat += new_data - *prev_stat;
4617 /* to manage the potential roll-over */
4618 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4620 /* Update the previously stored value to prepare for next read */
4621 *prev_stat = new_data;
4625 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4626 * @hw: ptr to the hardware info
4627 * @reg: offset of HW register to read from
4628 * @prev_stat_loaded: bool to specify if previous stats are loaded
4629 * @prev_stat: ptr to previous loaded stat value
4630 * @cur_stat: ptr to current stat value
4633 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4634 u64 *prev_stat, u64 *cur_stat)
4638 new_data = rd32(hw, reg);
4640 /* device stats are not reset at PFR, they likely will not be zeroed
4641 * when the driver starts. Thus, save the value from the first read
4642 * without adding to the statistic value so that we report stats which
4643 * count up from zero.
4645 if (!prev_stat_loaded) {
4646 *prev_stat = new_data;
4650 /* Calculate the difference between the new and old values, and then
4651 * add it to the software stat value.
4653 if (new_data >= *prev_stat)
4654 *cur_stat += new_data - *prev_stat;
4656 /* to manage the potential roll-over */
4657 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4659 /* Update the previously stored value to prepare for next read */
4660 *prev_stat = new_data;
4664 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4665 * @hw: ptr to the hardware info
4666 * @vsi_handle: VSI handle
4667 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4668 * @cur_stats: ptr to current stats structure
4670 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4671 * thus cannot be read using the normal ice_stat_update32 function.
4673 * Read the GLV_REPC register associated with the given VSI, and update the
4674 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4676 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4677 * cleared each time it's read.
4679 * Note that the GLV_RDPC register also counts the causes that would trigger
4680 * GLV_REPC. However, it does not give the finer grained detail about why the
4681 * packets are being dropped. The GLV_REPC values can be used to distinguish
4682 * whether Rx packets are dropped due to errors or due to no available
4686 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4687 struct ice_eth_stats *cur_stats)
4689 u16 vsi_num, no_desc, error_cnt;
4692 if (!ice_is_vsi_valid(hw, vsi_handle))
4695 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4697 /* If we haven't loaded stats yet, just clear the current value */
4698 if (!prev_stat_loaded) {
4699 wr32(hw, GLV_REPC(vsi_num), 0);
4703 repc = rd32(hw, GLV_REPC(vsi_num));
4704 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4705 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4707 /* Clear the count by writing to the stats register */
4708 wr32(hw, GLV_REPC(vsi_num), 0);
4710 cur_stats->rx_no_desc += no_desc;
4711 cur_stats->rx_errors += error_cnt;
4715 * ice_sched_query_elem - query element information from HW
4716 * @hw: pointer to the HW struct
4717 * @node_teid: node TEID to be queried
4718 * @buf: buffer to element information
4720 * This function queries HW element information
4723 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4724 struct ice_aqc_txsched_elem_data *buf)
4726 u16 buf_size, num_elem_ret = 0;
4727 enum ice_status status;
4729 buf_size = sizeof(*buf);
4730 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4731 buf->node_teid = CPU_TO_LE32(node_teid);
4732 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4734 if (status != ICE_SUCCESS || num_elem_ret != 1)
4735 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4740 * ice_get_fw_mode - returns FW mode
4741 * @hw: pointer to the HW struct
4743 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4745 #define ICE_FW_MODE_DBG_M BIT(0)
4746 #define ICE_FW_MODE_REC_M BIT(1)
4747 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4750 /* check the current FW mode */
4751 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4753 if (fw_mode & ICE_FW_MODE_DBG_M)
4754 return ICE_FW_MODE_DBG;
4755 else if (fw_mode & ICE_FW_MODE_REC_M)
4756 return ICE_FW_MODE_REC;
4757 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4758 return ICE_FW_MODE_ROLLBACK;
4760 return ICE_FW_MODE_NORMAL;
4764 * ice_fw_supports_link_override
4765 * @hw: pointer to the hardware structure
4767 * Checks if the firmware supports link override
4769 bool ice_fw_supports_link_override(struct ice_hw *hw)
4771 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4772 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4774 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4775 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4777 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4785 * ice_get_link_default_override
4786 * @ldo: pointer to the link default override struct
4787 * @pi: pointer to the port info struct
4789 * Gets the link default override for a port
4792 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4793 struct ice_port_info *pi)
4795 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4796 struct ice_hw *hw = pi->hw;
4797 enum ice_status status;
4799 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4800 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4802 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4806 /* Each port has its own config; calculate for our port */
4807 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4808 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4810 /* link options first */
4811 status = ice_read_sr_word(hw, tlv_start, &buf);
4813 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4816 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4817 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4818 ICE_LINK_OVERRIDE_PHY_CFG_S;
4820 /* link PHY config */
4821 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4822 status = ice_read_sr_word(hw, offset, &buf);
4824 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4827 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4830 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4831 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4832 status = ice_read_sr_word(hw, (offset + i), &buf);
4834 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4837 /* shift 16 bits at a time to fill 64 bits */
4838 ldo->phy_type_low |= ((u64)buf << (i * 16));
4841 /* PHY types high */
4842 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4843 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4844 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4845 status = ice_read_sr_word(hw, (offset + i), &buf);
4847 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4850 /* shift 16 bits at a time to fill 64 bits */
4851 ldo->phy_type_high |= ((u64)buf << (i * 16));
4858 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4859 * @caps: get PHY capability data
4861 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4863 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4864 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4865 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4866 ICE_AQC_PHY_AN_EN_CLAUSE37))
4873 * ice_aq_set_lldp_mib - Set the LLDP MIB
4874 * @hw: pointer to the HW struct
4875 * @mib_type: Local, Remote or both Local and Remote MIBs
4876 * @buf: pointer to the caller-supplied buffer to store the MIB block
4877 * @buf_size: size of the buffer (in bytes)
4878 * @cd: pointer to command details structure or NULL
4880 * Set the LLDP MIB. (0x0A08)
4883 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4884 struct ice_sq_cd *cd)
4886 struct ice_aqc_lldp_set_local_mib *cmd;
4887 struct ice_aq_desc desc;
4889 cmd = &desc.params.lldp_set_mib;
4891 if (buf_size == 0 || !buf)
4892 return ICE_ERR_PARAM;
4894 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4896 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
4897 desc.datalen = CPU_TO_LE16(buf_size);
4899 cmd->type = mib_type;
4900 cmd->length = CPU_TO_LE16(buf_size);
4902 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4906 * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
4907 * @hw: pointer to HW struct
4909 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
4911 if (hw->mac_type != ICE_MAC_E810)
4914 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
4915 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
4917 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
4918 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
4920 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
4927 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
4928 * @hw: pointer to HW struct
4929 * @vsi_num: absolute HW index for VSI
4930 * @add: boolean for if adding or removing a filter
4933 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
4935 struct ice_aqc_lldp_filter_ctrl *cmd;
4936 struct ice_aq_desc desc;
4938 cmd = &desc.params.lldp_filter_ctrl;
4940 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
4943 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
4945 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
4947 cmd->vsi_num = CPU_TO_LE16(vsi_num);
4949 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4953 * ice_fw_supports_report_dflt_cfg
4954 * @hw: pointer to the hardware structure
4956 * Checks if the firmware supports report default configuration
4958 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
4960 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4961 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
4963 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
4964 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
4966 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {