1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ice_common.h"
7 #include "ice_adminq_cmd.h"
10 #include "ice_switch.h"
12 #define ICE_PF_RESET_WAIT_COUNT 300
15 * ice_set_mac_type - Sets MAC type
16 * @hw: pointer to the HW structure
18 * This function sets the MAC type of the adapter based on the
19 * vendor ID and device ID stored in the HW structure.
21 static enum ice_status ice_set_mac_type(struct ice_hw *hw)
23 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
25 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
26 return ICE_ERR_DEVICE_NOT_SUPPORTED;
28 switch (hw->device_id) {
29 case ICE_DEV_ID_E810C_BACKPLANE:
30 case ICE_DEV_ID_E810C_QSFP:
31 case ICE_DEV_ID_E810C_SFP:
32 case ICE_DEV_ID_E810_XXV_BACKPLANE:
33 case ICE_DEV_ID_E810_XXV_QSFP:
34 case ICE_DEV_ID_E810_XXV_SFP:
35 hw->mac_type = ICE_MAC_E810;
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 hw->mac_type = ICE_MAC_GENERIC;
54 hw->mac_type = ICE_MAC_UNKNOWN;
58 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
63 * ice_clear_pf_cfg - Clear PF configuration
64 * @hw: pointer to the hardware structure
66 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
67 * configuration, flow director filters, etc.).
69 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
71 struct ice_aq_desc desc;
73 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
75 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
79 * ice_aq_manage_mac_read - manage MAC address read command
80 * @hw: pointer to the HW struct
81 * @buf: a virtual buffer to hold the manage MAC read response
82 * @buf_size: Size of the virtual buffer
83 * @cd: pointer to command details structure or NULL
85 * This function is used to return per PF station MAC address (0x0107).
86 * NOTE: Upon successful completion of this command, MAC address information
87 * is returned in user specified buffer. Please interpret user specified
88 * buffer as "manage_mac_read" response.
89 * Response such as various MAC addresses are stored in HW struct (port.mac)
90 * ice_aq_discover_caps is expected to be called before this function is called.
92 static enum ice_status
93 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
96 struct ice_aqc_manage_mac_read_resp *resp;
97 struct ice_aqc_manage_mac_read *cmd;
98 struct ice_aq_desc desc;
99 enum ice_status status;
103 cmd = &desc.params.mac_read;
105 if (buf_size < sizeof(*resp))
106 return ICE_ERR_BUF_TOO_SHORT;
108 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
110 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
114 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
115 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
117 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
118 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
122 /* A single port can report up to two (LAN and WoL) addresses */
123 for (i = 0; i < cmd->num_addr; i++)
124 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
125 ice_memcpy(hw->port_info->mac.lan_addr,
126 resp[i].mac_addr, ETH_ALEN,
128 ice_memcpy(hw->port_info->mac.perm_addr,
130 ETH_ALEN, ICE_DMA_TO_NONDMA);
137 * ice_aq_get_phy_caps - returns PHY capabilities
138 * @pi: port information structure
139 * @qual_mods: report qualified modules
140 * @report_mode: report mode capabilities
141 * @pcaps: structure for PHY capabilities to be filled
142 * @cd: pointer to command details structure or NULL
144 * Returns the various PHY capabilities supported on the Port (0x0600)
147 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
148 struct ice_aqc_get_phy_caps_data *pcaps,
149 struct ice_sq_cd *cd)
151 struct ice_aqc_get_phy_caps *cmd;
152 u16 pcaps_size = sizeof(*pcaps);
153 struct ice_aq_desc desc;
154 enum ice_status status;
157 cmd = &desc.params.get_phy;
159 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
160 return ICE_ERR_PARAM;
163 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
166 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
168 cmd->param0 |= CPU_TO_LE16(report_mode);
169 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
171 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
173 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
174 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
175 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
176 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
177 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
178 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
179 pcaps->low_power_ctrl_an);
180 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
181 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
183 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
184 pcaps->link_fec_options);
185 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
186 pcaps->module_compliance_enforcement);
187 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
188 pcaps->extended_compliance_code);
189 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
190 pcaps->module_type[0]);
191 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
192 pcaps->module_type[1]);
193 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
194 pcaps->module_type[2]);
196 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
197 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
198 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
199 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
200 sizeof(pi->phy.link_info.module_type),
201 ICE_NONDMA_TO_NONDMA);
208 * ice_aq_get_link_topo_handle - get link topology node return status
209 * @pi: port information structure
210 * @node_type: requested node type
211 * @cd: pointer to command details structure or NULL
213 * Get link topology node return status for specified node type (0x06E0)
215 * Node type cage can be used to determine if cage is present. If AQC
216 * returns error (ENOENT), then no cage present. If no cage present, then
217 * connection type is backplane or BASE-T.
219 static enum ice_status
220 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
221 struct ice_sq_cd *cd)
223 struct ice_aqc_get_link_topo *cmd;
224 struct ice_aq_desc desc;
226 cmd = &desc.params.get_link_topo;
228 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
230 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
231 ICE_AQC_LINK_TOPO_NODE_CTX_S);
234 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
236 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
240 * ice_is_media_cage_present
241 * @pi: port information structure
243 * Returns true if media cage is present, else false. If no cage, then
244 * media type is backplane or BASE-T.
246 static bool ice_is_media_cage_present(struct ice_port_info *pi)
248 /* Node type cage can be used to determine if cage is present. If AQC
249 * returns error (ENOENT), then no cage present. If no cage present then
250 * connection type is backplane or BASE-T.
252 return !ice_aq_get_link_topo_handle(pi,
253 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
258 * ice_get_media_type - Gets media type
259 * @pi: port information structure
261 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
263 struct ice_link_status *hw_link_info;
266 return ICE_MEDIA_UNKNOWN;
268 hw_link_info = &pi->phy.link_info;
269 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
270 /* If more than one media type is selected, report unknown */
271 return ICE_MEDIA_UNKNOWN;
273 if (hw_link_info->phy_type_low) {
274 /* 1G SGMII is a special case where some DA cable PHYs
275 * may show this as an option when it really shouldn't
276 * be since SGMII is meant to be between a MAC and a PHY
277 * in a backplane. Try to detect this case and handle it
279 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
280 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
281 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
282 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
283 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
286 switch (hw_link_info->phy_type_low) {
287 case ICE_PHY_TYPE_LOW_1000BASE_SX:
288 case ICE_PHY_TYPE_LOW_1000BASE_LX:
289 case ICE_PHY_TYPE_LOW_10GBASE_SR:
290 case ICE_PHY_TYPE_LOW_10GBASE_LR:
291 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
292 case ICE_PHY_TYPE_LOW_25GBASE_SR:
293 case ICE_PHY_TYPE_LOW_25GBASE_LR:
294 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
295 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
296 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
297 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
298 case ICE_PHY_TYPE_LOW_50GBASE_SR:
299 case ICE_PHY_TYPE_LOW_50GBASE_FR:
300 case ICE_PHY_TYPE_LOW_50GBASE_LR:
301 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
302 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
303 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
304 case ICE_PHY_TYPE_LOW_100GBASE_DR:
305 return ICE_MEDIA_FIBER;
306 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
307 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
308 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
309 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
310 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
311 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
312 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
313 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
314 return ICE_MEDIA_FIBER;
315 case ICE_PHY_TYPE_LOW_100BASE_TX:
316 case ICE_PHY_TYPE_LOW_1000BASE_T:
317 case ICE_PHY_TYPE_LOW_2500BASE_T:
318 case ICE_PHY_TYPE_LOW_5GBASE_T:
319 case ICE_PHY_TYPE_LOW_10GBASE_T:
320 case ICE_PHY_TYPE_LOW_25GBASE_T:
321 return ICE_MEDIA_BASET;
322 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
323 case ICE_PHY_TYPE_LOW_25GBASE_CR:
324 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
325 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
326 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
327 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
328 case ICE_PHY_TYPE_LOW_50GBASE_CP:
329 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
330 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
331 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
333 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
334 case ICE_PHY_TYPE_LOW_40G_XLAUI:
335 case ICE_PHY_TYPE_LOW_50G_LAUI2:
336 case ICE_PHY_TYPE_LOW_50G_AUI2:
337 case ICE_PHY_TYPE_LOW_50G_AUI1:
338 case ICE_PHY_TYPE_LOW_100G_AUI4:
339 case ICE_PHY_TYPE_LOW_100G_CAUI4:
340 if (ice_is_media_cage_present(pi))
341 return ICE_MEDIA_AUI;
343 case ICE_PHY_TYPE_LOW_1000BASE_KX:
344 case ICE_PHY_TYPE_LOW_2500BASE_KX:
345 case ICE_PHY_TYPE_LOW_2500BASE_X:
346 case ICE_PHY_TYPE_LOW_5GBASE_KR:
347 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
348 case ICE_PHY_TYPE_LOW_25GBASE_KR:
349 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
350 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
351 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
352 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
353 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
354 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
355 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
356 return ICE_MEDIA_BACKPLANE;
359 switch (hw_link_info->phy_type_high) {
360 case ICE_PHY_TYPE_HIGH_100G_AUI2:
361 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
362 if (ice_is_media_cage_present(pi))
363 return ICE_MEDIA_AUI;
365 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
366 return ICE_MEDIA_BACKPLANE;
367 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
368 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
369 return ICE_MEDIA_FIBER;
372 return ICE_MEDIA_UNKNOWN;
376 * ice_aq_get_link_info
377 * @pi: port information structure
378 * @ena_lse: enable/disable LinkStatusEvent reporting
379 * @link: pointer to link status structure - optional
380 * @cd: pointer to command details structure or NULL
382 * Get Link Status (0x607). Returns the link status of the adapter.
385 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
386 struct ice_link_status *link, struct ice_sq_cd *cd)
388 struct ice_aqc_get_link_status_data link_data = { 0 };
389 struct ice_aqc_get_link_status *resp;
390 struct ice_link_status *li_old, *li;
391 enum ice_media_type *hw_media_type;
392 struct ice_fc_info *hw_fc_info;
393 bool tx_pause, rx_pause;
394 struct ice_aq_desc desc;
395 enum ice_status status;
400 return ICE_ERR_PARAM;
402 li_old = &pi->phy.link_info_old;
403 hw_media_type = &pi->phy.media_type;
404 li = &pi->phy.link_info;
405 hw_fc_info = &pi->fc;
407 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
408 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
409 resp = &desc.params.get_link_status;
410 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
411 resp->lport_num = pi->lport;
413 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
415 if (status != ICE_SUCCESS)
418 /* save off old link status information */
421 /* update current link status information */
422 li->link_speed = LE16_TO_CPU(link_data.link_speed);
423 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
424 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
425 *hw_media_type = ice_get_media_type(pi);
426 li->link_info = link_data.link_info;
427 li->an_info = link_data.an_info;
428 li->ext_info = link_data.ext_info;
429 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
430 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
431 li->topo_media_conflict = link_data.topo_media_conflict;
432 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
433 ICE_AQ_CFG_PACING_TYPE_M);
436 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
437 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
438 if (tx_pause && rx_pause)
439 hw_fc_info->current_mode = ICE_FC_FULL;
441 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
443 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
445 hw_fc_info->current_mode = ICE_FC_NONE;
447 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
449 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
450 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
451 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
452 (unsigned long long)li->phy_type_low);
453 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
454 (unsigned long long)li->phy_type_high);
455 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
456 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
457 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
458 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
459 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
460 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
461 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
463 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
465 /* save link status information */
469 /* flag cleared so calling functions don't call AQ again */
470 pi->phy.get_link_info = false;
476 * ice_fill_tx_timer_and_fc_thresh
477 * @hw: pointer to the HW struct
478 * @cmd: pointer to MAC cfg structure
480 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
484 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
485 struct ice_aqc_set_mac_cfg *cmd)
487 u16 fc_thres_val, tx_timer_val;
490 /* We read back the transmit timer and fc threshold value of
491 * LFC. Thus, we will use index =
492 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
494 * Also, because we are opearating on transmit timer and fc
495 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
497 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
499 /* Retrieve the transmit timer */
500 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
502 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
503 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
505 /* Retrieve the fc threshold */
506 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
507 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
509 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
514 * @hw: pointer to the HW struct
515 * @max_frame_size: Maximum Frame Size to be supported
516 * @cd: pointer to command details structure or NULL
518 * Set MAC configuration (0x0603)
521 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
523 struct ice_aqc_set_mac_cfg *cmd;
524 struct ice_aq_desc desc;
526 cmd = &desc.params.set_mac_cfg;
528 if (max_frame_size == 0)
529 return ICE_ERR_PARAM;
531 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
533 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
535 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
537 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
541 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
542 * @hw: pointer to the HW struct
544 enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
546 struct ice_switch_info *sw;
547 enum ice_status status;
549 hw->switch_info = (struct ice_switch_info *)
550 ice_malloc(hw, sizeof(*hw->switch_info));
552 sw = hw->switch_info;
555 return ICE_ERR_NO_MEMORY;
557 INIT_LIST_HEAD(&sw->vsi_list_map_head);
558 sw->prof_res_bm_init = 0;
560 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
562 ice_free(hw, hw->switch_info);
569 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
570 * @hw: pointer to the HW struct
571 * @sw: pointer to switch info struct for which function clears filters
574 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
576 struct ice_vsi_list_map_info *v_pos_map;
577 struct ice_vsi_list_map_info *v_tmp_map;
578 struct ice_sw_recipe *recps;
584 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
585 ice_vsi_list_map_info, list_entry) {
586 LIST_DEL(&v_pos_map->list_entry);
587 ice_free(hw, v_pos_map);
589 recps = sw->recp_list;
590 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
591 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
593 recps[i].root_rid = i;
594 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
595 &recps[i].rg_list, ice_recp_grp_entry,
597 LIST_DEL(&rg_entry->l_entry);
598 ice_free(hw, rg_entry);
601 if (recps[i].adv_rule) {
602 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
603 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
605 ice_destroy_lock(&recps[i].filt_rule_lock);
606 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
607 &recps[i].filt_rules,
608 ice_adv_fltr_mgmt_list_entry,
610 LIST_DEL(&lst_itr->list_entry);
611 ice_free(hw, lst_itr->lkups);
612 ice_free(hw, lst_itr);
615 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
617 ice_destroy_lock(&recps[i].filt_rule_lock);
618 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
619 &recps[i].filt_rules,
620 ice_fltr_mgmt_list_entry,
622 LIST_DEL(&lst_itr->list_entry);
623 ice_free(hw, lst_itr);
626 if (recps[i].root_buf)
627 ice_free(hw, recps[i].root_buf);
629 ice_rm_sw_replay_rule_info(hw, sw);
630 ice_free(hw, sw->recp_list);
635 * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
636 * @hw: pointer to the HW struct
638 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
640 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
644 * ice_get_itr_intrl_gran
645 * @hw: pointer to the HW struct
647 * Determines the ITR/INTRL granularities based on the maximum aggregate
648 * bandwidth according to the device's configuration during power-on.
650 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
652 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
653 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
654 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
656 switch (max_agg_bw) {
657 case ICE_MAX_AGG_BW_200G:
658 case ICE_MAX_AGG_BW_100G:
659 case ICE_MAX_AGG_BW_50G:
660 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
661 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
663 case ICE_MAX_AGG_BW_25G:
664 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
665 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
671 * ice_print_rollback_msg - print FW rollback message
672 * @hw: pointer to the hardware structure
674 void ice_print_rollback_msg(struct ice_hw *hw)
676 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
677 struct ice_nvm_info *nvm = &hw->nvm;
678 struct ice_orom_info *orom;
682 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
683 nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
684 orom->build, orom->patch);
686 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
687 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
691 * ice_init_hw - main hardware initialization routine
692 * @hw: pointer to the hardware structure
694 enum ice_status ice_init_hw(struct ice_hw *hw)
696 struct ice_aqc_get_phy_caps_data *pcaps;
697 enum ice_status status;
701 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
703 /* Set MAC type based on DeviceID */
704 status = ice_set_mac_type(hw);
708 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
709 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
710 PF_FUNC_RID_FUNCTION_NUMBER_S;
712 status = ice_reset(hw, ICE_RESET_PFR);
716 ice_get_itr_intrl_gran(hw);
718 status = ice_create_all_ctrlq(hw);
720 goto err_unroll_cqinit;
722 status = ice_init_nvm(hw);
724 goto err_unroll_cqinit;
726 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
727 ice_print_rollback_msg(hw);
729 status = ice_clear_pf_cfg(hw);
731 goto err_unroll_cqinit;
733 /* Set bit to enable Flow Director filters */
734 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
735 INIT_LIST_HEAD(&hw->fdir_list_head);
737 ice_clear_pxe_mode(hw);
739 status = ice_get_caps(hw);
741 goto err_unroll_cqinit;
743 hw->port_info = (struct ice_port_info *)
744 ice_malloc(hw, sizeof(*hw->port_info));
745 if (!hw->port_info) {
746 status = ICE_ERR_NO_MEMORY;
747 goto err_unroll_cqinit;
750 /* set the back pointer to HW */
751 hw->port_info->hw = hw;
753 /* Initialize port_info struct with switch configuration data */
754 status = ice_get_initial_sw_cfg(hw);
756 goto err_unroll_alloc;
759 /* Query the allocated resources for Tx scheduler */
760 status = ice_sched_query_res_alloc(hw);
762 ice_debug(hw, ICE_DBG_SCHED,
763 "Failed to get scheduler allocated resources\n");
764 goto err_unroll_alloc;
766 ice_sched_get_psm_clk_freq(hw);
768 /* Initialize port_info struct with scheduler data */
769 status = ice_sched_init_port(hw->port_info);
771 goto err_unroll_sched;
773 pcaps = (struct ice_aqc_get_phy_caps_data *)
774 ice_malloc(hw, sizeof(*pcaps));
776 status = ICE_ERR_NO_MEMORY;
777 goto err_unroll_sched;
780 /* Initialize port_info struct with PHY capabilities */
781 status = ice_aq_get_phy_caps(hw->port_info, false,
782 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
785 goto err_unroll_sched;
787 /* Initialize port_info struct with link information */
788 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
790 goto err_unroll_sched;
791 /* need a valid SW entry point to build a Tx tree */
792 if (!hw->sw_entry_point_layer) {
793 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
794 status = ICE_ERR_CFG;
795 goto err_unroll_sched;
797 INIT_LIST_HEAD(&hw->agg_list);
798 /* Initialize max burst size */
799 if (!hw->max_burst_size)
800 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
801 status = ice_init_fltr_mgmt_struct(hw);
803 goto err_unroll_sched;
805 /* Get MAC information */
806 /* A single port can report up to two (LAN and WoL) addresses */
807 mac_buf = ice_calloc(hw, 2,
808 sizeof(struct ice_aqc_manage_mac_read_resp));
809 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
812 status = ICE_ERR_NO_MEMORY;
813 goto err_unroll_fltr_mgmt_struct;
816 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
817 ice_free(hw, mac_buf);
820 goto err_unroll_fltr_mgmt_struct;
821 /* enable jumbo frame support at MAC level */
822 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
824 goto err_unroll_fltr_mgmt_struct;
825 /* Obtain counter base index which would be used by flow director */
826 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
828 goto err_unroll_fltr_mgmt_struct;
829 status = ice_init_hw_tbls(hw);
831 goto err_unroll_fltr_mgmt_struct;
832 ice_init_lock(&hw->tnl_lock);
835 err_unroll_fltr_mgmt_struct:
836 ice_cleanup_fltr_mgmt_struct(hw);
838 ice_sched_cleanup_all(hw);
840 ice_free(hw, hw->port_info);
841 hw->port_info = NULL;
843 ice_destroy_all_ctrlq(hw);
848 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
849 * @hw: pointer to the hardware structure
851 * This should be called only during nominal operation, not as a result of
852 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
853 * applicable initializations if it fails for any reason.
855 void ice_deinit_hw(struct ice_hw *hw)
857 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
858 ice_cleanup_fltr_mgmt_struct(hw);
860 ice_sched_cleanup_all(hw);
861 ice_sched_clear_agg(hw);
863 ice_free_hw_tbls(hw);
864 ice_destroy_lock(&hw->tnl_lock);
867 ice_free(hw, hw->port_info);
868 hw->port_info = NULL;
871 ice_destroy_all_ctrlq(hw);
873 /* Clear VSI contexts if not already cleared */
874 ice_clear_all_vsi_ctx(hw);
878 * ice_check_reset - Check to see if a global reset is complete
879 * @hw: pointer to the hardware structure
881 enum ice_status ice_check_reset(struct ice_hw *hw)
883 u32 cnt, reg = 0, grst_timeout, uld_mask;
885 /* Poll for Device Active state in case a recent CORER, GLOBR,
886 * or EMPR has occurred. The grst delay value is in 100ms units.
887 * Add 1sec for outstanding AQ commands that can take a long time.
889 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
890 GLGEN_RSTCTL_GRSTDEL_S) + 10;
892 for (cnt = 0; cnt < grst_timeout; cnt++) {
893 ice_msec_delay(100, true);
894 reg = rd32(hw, GLGEN_RSTAT);
895 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
899 if (cnt == grst_timeout) {
900 ice_debug(hw, ICE_DBG_INIT,
901 "Global reset polling failed to complete.\n");
902 return ICE_ERR_RESET_FAILED;
905 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
906 GLNVM_ULD_PCIER_DONE_1_M |\
907 GLNVM_ULD_CORER_DONE_M |\
908 GLNVM_ULD_GLOBR_DONE_M |\
909 GLNVM_ULD_POR_DONE_M |\
910 GLNVM_ULD_POR_DONE_1_M |\
911 GLNVM_ULD_PCIER_DONE_2_M)
913 uld_mask = ICE_RESET_DONE_MASK;
915 /* Device is Active; check Global Reset processes are done */
916 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
917 reg = rd32(hw, GLNVM_ULD) & uld_mask;
918 if (reg == uld_mask) {
919 ice_debug(hw, ICE_DBG_INIT,
920 "Global reset processes done. %d\n", cnt);
923 ice_msec_delay(10, true);
926 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
927 ice_debug(hw, ICE_DBG_INIT,
928 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
930 return ICE_ERR_RESET_FAILED;
937 * ice_pf_reset - Reset the PF
938 * @hw: pointer to the hardware structure
940 * If a global reset has been triggered, this function checks
941 * for its completion and then issues the PF reset
943 static enum ice_status ice_pf_reset(struct ice_hw *hw)
947 /* If at function entry a global reset was already in progress, i.e.
948 * state is not 'device active' or any of the reset done bits are not
949 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
950 * global reset is done.
952 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
953 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
954 /* poll on global reset currently in progress until done */
955 if (ice_check_reset(hw))
956 return ICE_ERR_RESET_FAILED;
962 reg = rd32(hw, PFGEN_CTRL);
964 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
966 /* Wait for the PFR to complete. The wait time is the global config lock
967 * timeout plus the PFR timeout which will account for a possible reset
968 * that is occurring during a download package operation.
970 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
971 ICE_PF_RESET_WAIT_COUNT; cnt++) {
972 reg = rd32(hw, PFGEN_CTRL);
973 if (!(reg & PFGEN_CTRL_PFSWR_M))
976 ice_msec_delay(1, true);
979 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
980 ice_debug(hw, ICE_DBG_INIT,
981 "PF reset polling failed to complete.\n");
982 return ICE_ERR_RESET_FAILED;
989 * ice_reset - Perform different types of reset
990 * @hw: pointer to the hardware structure
991 * @req: reset request
993 * This function triggers a reset as specified by the req parameter.
996 * If anything other than a PF reset is triggered, PXE mode is restored.
997 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
998 * interface has been restored in the rebuild flow.
1000 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1006 return ice_pf_reset(hw);
1007 case ICE_RESET_CORER:
1008 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1009 val = GLGEN_RTRIG_CORER_M;
1011 case ICE_RESET_GLOBR:
1012 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1013 val = GLGEN_RTRIG_GLOBR_M;
1016 return ICE_ERR_PARAM;
1019 val |= rd32(hw, GLGEN_RTRIG);
1020 wr32(hw, GLGEN_RTRIG, val);
1023 /* wait for the FW to be ready */
1024 return ice_check_reset(hw);
1028 * ice_copy_rxq_ctx_to_hw
1029 * @hw: pointer to the hardware structure
1030 * @ice_rxq_ctx: pointer to the rxq context
1031 * @rxq_index: the index of the Rx queue
1033 * Copies rxq context from dense structure to HW register space
1035 static enum ice_status
1036 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1041 return ICE_ERR_BAD_PTR;
1043 if (rxq_index > QRX_CTRL_MAX_INDEX)
1044 return ICE_ERR_PARAM;
1046 /* Copy each dword separately to HW */
1047 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1048 wr32(hw, QRX_CONTEXT(i, rxq_index),
1049 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1051 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1052 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1058 /* LAN Rx Queue Context */
1059 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1060 /* Field Width LSB */
1061 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1062 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1063 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1064 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1065 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1066 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1067 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1068 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1069 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1070 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1071 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1072 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1073 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1074 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1075 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1076 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1077 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1078 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1079 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1080 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1086 * @hw: pointer to the hardware structure
1087 * @rlan_ctx: pointer to the rxq context
1088 * @rxq_index: the index of the Rx queue
1090 * Converts rxq context from sparse to dense structure and then writes
1091 * it to HW register space and enables the hardware to prefetch descriptors
1092 * instead of only fetching them on demand
1095 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1098 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1101 return ICE_ERR_BAD_PTR;
1103 rlan_ctx->prefena = 1;
1105 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1106 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1111 * @hw: pointer to the hardware structure
1112 * @rxq_index: the index of the Rx queue to clear
1114 * Clears rxq context in HW register space
1116 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1120 if (rxq_index > QRX_CTRL_MAX_INDEX)
1121 return ICE_ERR_PARAM;
1123 /* Clear each dword register separately */
1124 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1125 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1130 /* LAN Tx Queue Context */
1131 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1132 /* Field Width LSB */
1133 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1134 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1135 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1136 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1137 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1138 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1139 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1140 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1141 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1142 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1143 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1144 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1145 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1146 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1147 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1148 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1149 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1150 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1151 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1152 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1153 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1154 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1155 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1156 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1157 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1158 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1159 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1160 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1165 * ice_copy_tx_cmpltnq_ctx_to_hw
1166 * @hw: pointer to the hardware structure
1167 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1168 * @tx_cmpltnq_index: the index of the completion queue
1170 * Copies Tx completion queue context from dense structure to HW register space
1172 static enum ice_status
1173 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1174 u32 tx_cmpltnq_index)
1178 if (!ice_tx_cmpltnq_ctx)
1179 return ICE_ERR_BAD_PTR;
1181 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1182 return ICE_ERR_PARAM;
1184 /* Copy each dword separately to HW */
1185 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1186 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1187 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1189 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1190 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1196 /* LAN Tx Completion Queue Context */
1197 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1198 /* Field Width LSB */
1199 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1200 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1201 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1202 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1203 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1204 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1205 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1206 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1207 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1208 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1213 * ice_write_tx_cmpltnq_ctx
1214 * @hw: pointer to the hardware structure
1215 * @tx_cmpltnq_ctx: pointer to the completion queue context
1216 * @tx_cmpltnq_index: the index of the completion queue
1218 * Converts completion queue context from sparse to dense structure and then
1219 * writes it to HW register space
1222 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1223 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1224 u32 tx_cmpltnq_index)
1226 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1228 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1229 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1233 * ice_clear_tx_cmpltnq_ctx
1234 * @hw: pointer to the hardware structure
1235 * @tx_cmpltnq_index: the index of the completion queue to clear
1237 * Clears Tx completion queue context in HW register space
1240 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1244 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1245 return ICE_ERR_PARAM;
1247 /* Clear each dword register separately */
1248 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1249 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1255 * ice_copy_tx_drbell_q_ctx_to_hw
1256 * @hw: pointer to the hardware structure
1257 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1258 * @tx_drbell_q_index: the index of the doorbell queue
1260 * Copies doorbell queue context from dense structure to HW register space
1262 static enum ice_status
1263 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1264 u32 tx_drbell_q_index)
1268 if (!ice_tx_drbell_q_ctx)
1269 return ICE_ERR_BAD_PTR;
1271 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1272 return ICE_ERR_PARAM;
1274 /* Copy each dword separately to HW */
1275 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1276 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1277 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1279 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1280 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1286 /* LAN Tx Doorbell Queue Context info */
1287 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1288 /* Field Width LSB */
1289 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1290 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1291 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1292 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1293 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1294 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1295 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1296 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1297 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1298 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1299 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1304 * ice_write_tx_drbell_q_ctx
1305 * @hw: pointer to the hardware structure
1306 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1307 * @tx_drbell_q_index: the index of the doorbell queue
1309 * Converts doorbell queue context from sparse to dense structure and then
1310 * writes it to HW register space
1313 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1314 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1315 u32 tx_drbell_q_index)
1317 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1319 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1320 ice_tx_drbell_q_ctx_info);
1321 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1325 * ice_clear_tx_drbell_q_ctx
1326 * @hw: pointer to the hardware structure
1327 * @tx_drbell_q_index: the index of the doorbell queue to clear
1329 * Clears doorbell queue context in HW register space
1332 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1336 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1337 return ICE_ERR_PARAM;
1339 /* Clear each dword register separately */
1340 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1341 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1346 /* FW Admin Queue command wrappers */
1349 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1350 * @hw: pointer to the HW struct
1351 * @desc: descriptor describing the command
1352 * @buf: buffer to use for indirect commands (NULL for direct commands)
1353 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1354 * @cd: pointer to command details structure
1356 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1359 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1360 u16 buf_size, struct ice_sq_cd *cd)
1362 if (hw->aq_send_cmd_fn) {
1363 enum ice_status status = ICE_ERR_NOT_READY;
1364 u16 retval = ICE_AQ_RC_OK;
1366 ice_acquire_lock(&hw->adminq.sq_lock);
1367 if (!hw->aq_send_cmd_fn(hw->aq_send_cmd_param, desc,
1369 retval = LE16_TO_CPU(desc->retval);
1370 /* strip off FW internal code */
1373 if (retval == ICE_AQ_RC_OK)
1374 status = ICE_SUCCESS;
1376 status = ICE_ERR_AQ_ERROR;
1379 hw->adminq.sq_last_status = (enum ice_aq_err)retval;
1380 ice_release_lock(&hw->adminq.sq_lock);
1384 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1389 * @hw: pointer to the HW struct
1390 * @cd: pointer to command details structure or NULL
1392 * Get the firmware version (0x0001) from the admin queue commands
1394 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1396 struct ice_aqc_get_ver *resp;
1397 struct ice_aq_desc desc;
1398 enum ice_status status;
1400 resp = &desc.params.get_ver;
1402 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1404 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1407 hw->fw_branch = resp->fw_branch;
1408 hw->fw_maj_ver = resp->fw_major;
1409 hw->fw_min_ver = resp->fw_minor;
1410 hw->fw_patch = resp->fw_patch;
1411 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1412 hw->api_branch = resp->api_branch;
1413 hw->api_maj_ver = resp->api_major;
1414 hw->api_min_ver = resp->api_minor;
1415 hw->api_patch = resp->api_patch;
1422 * ice_aq_send_driver_ver
1423 * @hw: pointer to the HW struct
1424 * @dv: driver's major, minor version
1425 * @cd: pointer to command details structure or NULL
1427 * Send the driver version (0x0002) to the firmware
1430 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1431 struct ice_sq_cd *cd)
1433 struct ice_aqc_driver_ver *cmd;
1434 struct ice_aq_desc desc;
1437 cmd = &desc.params.driver_ver;
1440 return ICE_ERR_PARAM;
1442 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1444 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1445 cmd->major_ver = dv->major_ver;
1446 cmd->minor_ver = dv->minor_ver;
1447 cmd->build_ver = dv->build_ver;
1448 cmd->subbuild_ver = dv->subbuild_ver;
1451 while (len < sizeof(dv->driver_string) &&
1452 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1455 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1460 * @hw: pointer to the HW struct
1461 * @unloading: is the driver unloading itself
1463 * Tell the Firmware that we're shutting down the AdminQ and whether
1464 * or not the driver is unloading as well (0x0003).
1466 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1468 struct ice_aqc_q_shutdown *cmd;
1469 struct ice_aq_desc desc;
1471 cmd = &desc.params.q_shutdown;
1473 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1476 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1478 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1483 * @hw: pointer to the HW struct
1485 * @access: access type
1486 * @sdp_number: resource number
1487 * @timeout: the maximum time in ms that the driver may hold the resource
1488 * @cd: pointer to command details structure or NULL
1490 * Requests common resource using the admin queue commands (0x0008).
1491 * When attempting to acquire the Global Config Lock, the driver can
1492 * learn of three states:
1493 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1494 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1495 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1496 * successfully downloaded the package; the driver does
1497 * not have to download the package and can continue
1500 * Note that if the caller is in an acquire lock, perform action, release lock
1501 * phase of operation, it is possible that the FW may detect a timeout and issue
1502 * a CORER. In this case, the driver will receive a CORER interrupt and will
1503 * have to determine its cause. The calling thread that is handling this flow
1504 * will likely get an error propagated back to it indicating the Download
1505 * Package, Update Package or the Release Resource AQ commands timed out.
1507 static enum ice_status
1508 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1509 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1510 struct ice_sq_cd *cd)
1512 struct ice_aqc_req_res *cmd_resp;
1513 struct ice_aq_desc desc;
1514 enum ice_status status;
1516 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1518 cmd_resp = &desc.params.res_owner;
1520 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1522 cmd_resp->res_id = CPU_TO_LE16(res);
1523 cmd_resp->access_type = CPU_TO_LE16(access);
1524 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1525 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1528 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1530 /* The completion specifies the maximum time in ms that the driver
1531 * may hold the resource in the Timeout field.
1534 /* Global config lock response utilizes an additional status field.
1536 * If the Global config lock resource is held by some other driver, the
1537 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1538 * and the timeout field indicates the maximum time the current owner
1539 * of the resource has to free it.
1541 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1542 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1543 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1545 } else if (LE16_TO_CPU(cmd_resp->status) ==
1546 ICE_AQ_RES_GLBL_IN_PROG) {
1547 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1548 return ICE_ERR_AQ_ERROR;
1549 } else if (LE16_TO_CPU(cmd_resp->status) ==
1550 ICE_AQ_RES_GLBL_DONE) {
1551 return ICE_ERR_AQ_NO_WORK;
1554 /* invalid FW response, force a timeout immediately */
1556 return ICE_ERR_AQ_ERROR;
1559 /* If the resource is held by some other driver, the command completes
1560 * with a busy return value and the timeout field indicates the maximum
1561 * time the current owner of the resource has to free it.
1563 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1564 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1570 * ice_aq_release_res
1571 * @hw: pointer to the HW struct
1573 * @sdp_number: resource number
1574 * @cd: pointer to command details structure or NULL
1576 * release common resource using the admin queue commands (0x0009)
1578 static enum ice_status
1579 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1580 struct ice_sq_cd *cd)
1582 struct ice_aqc_req_res *cmd;
1583 struct ice_aq_desc desc;
1585 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1587 cmd = &desc.params.res_owner;
1589 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1591 cmd->res_id = CPU_TO_LE16(res);
1592 cmd->res_number = CPU_TO_LE32(sdp_number);
1594 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1599 * @hw: pointer to the HW structure
1601 * @access: access type (read or write)
1602 * @timeout: timeout in milliseconds
1604 * This function will attempt to acquire the ownership of a resource.
1607 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1608 enum ice_aq_res_access_type access, u32 timeout)
1610 #define ICE_RES_POLLING_DELAY_MS 10
1611 u32 delay = ICE_RES_POLLING_DELAY_MS;
1612 u32 time_left = timeout;
1613 enum ice_status status;
1615 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1617 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1619 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1620 * previously acquired the resource and performed any necessary updates;
1621 * in this case the caller does not obtain the resource and has no
1622 * further work to do.
1624 if (status == ICE_ERR_AQ_NO_WORK)
1625 goto ice_acquire_res_exit;
1628 ice_debug(hw, ICE_DBG_RES,
1629 "resource %d acquire type %d failed.\n", res, access);
1631 /* If necessary, poll until the current lock owner timeouts */
1632 timeout = time_left;
1633 while (status && timeout && time_left) {
1634 ice_msec_delay(delay, true);
1635 timeout = (timeout > delay) ? timeout - delay : 0;
1636 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1638 if (status == ICE_ERR_AQ_NO_WORK)
1639 /* lock free, but no work to do */
1646 if (status && status != ICE_ERR_AQ_NO_WORK)
1647 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1649 ice_acquire_res_exit:
1650 if (status == ICE_ERR_AQ_NO_WORK) {
1651 if (access == ICE_RES_WRITE)
1652 ice_debug(hw, ICE_DBG_RES,
1653 "resource indicates no work to do.\n");
1655 ice_debug(hw, ICE_DBG_RES,
1656 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1663 * @hw: pointer to the HW structure
1666 * This function will release a resource using the proper Admin Command.
1668 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1670 enum ice_status status;
1671 u32 total_delay = 0;
1673 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1675 status = ice_aq_release_res(hw, res, 0, NULL);
1677 /* there are some rare cases when trying to release the resource
1678 * results in an admin queue timeout, so handle them correctly
1680 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1681 (total_delay < hw->adminq.sq_cmd_timeout)) {
1682 ice_msec_delay(1, true);
1683 status = ice_aq_release_res(hw, res, 0, NULL);
1689 * ice_aq_alloc_free_res - command to allocate/free resources
1690 * @hw: pointer to the HW struct
1691 * @num_entries: number of resource entries in buffer
1692 * @buf: Indirect buffer to hold data parameters and response
1693 * @buf_size: size of buffer for indirect commands
1694 * @opc: pass in the command opcode
1695 * @cd: pointer to command details structure or NULL
1697 * Helper function to allocate/free resources using the admin queue commands
1700 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1701 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1702 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1704 struct ice_aqc_alloc_free_res_cmd *cmd;
1705 struct ice_aq_desc desc;
1707 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1709 cmd = &desc.params.sw_res_ctrl;
1712 return ICE_ERR_PARAM;
1714 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1715 return ICE_ERR_PARAM;
1717 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1719 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1721 cmd->num_entries = CPU_TO_LE16(num_entries);
1723 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1727 * ice_alloc_hw_res - allocate resource
1728 * @hw: pointer to the HW struct
1729 * @type: type of resource
1730 * @num: number of resources to allocate
1731 * @btm: allocate from bottom
1732 * @res: pointer to array that will receive the resources
1735 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1737 struct ice_aqc_alloc_free_res_elem *buf;
1738 enum ice_status status;
1741 buf_len = ice_struct_size(buf, elem, num - 1);
1742 buf = (struct ice_aqc_alloc_free_res_elem *)
1743 ice_malloc(hw, buf_len);
1745 return ICE_ERR_NO_MEMORY;
1747 /* Prepare buffer to allocate resource. */
1748 buf->num_elems = CPU_TO_LE16(num);
1749 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1750 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1752 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1754 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1755 ice_aqc_opc_alloc_res, NULL);
1757 goto ice_alloc_res_exit;
1759 ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
1760 ICE_NONDMA_TO_NONDMA);
1768 * ice_free_hw_res - free allocated HW resource
1769 * @hw: pointer to the HW struct
1770 * @type: type of resource to free
1771 * @num: number of resources
1772 * @res: pointer to array that contains the resources to free
1774 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1776 struct ice_aqc_alloc_free_res_elem *buf;
1777 enum ice_status status;
1780 buf_len = ice_struct_size(buf, elem, num - 1);
1781 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1783 return ICE_ERR_NO_MEMORY;
1785 /* Prepare buffer to free resource. */
1786 buf->num_elems = CPU_TO_LE16(num);
1787 buf->res_type = CPU_TO_LE16(type);
1788 ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
1789 ICE_NONDMA_TO_NONDMA);
1791 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1792 ice_aqc_opc_free_res, NULL);
1794 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1801 * ice_get_num_per_func - determine number of resources per PF
1802 * @hw: pointer to the HW structure
1803 * @max: value to be evenly split between each PF
1805 * Determine the number of valid functions by going through the bitmap returned
1806 * from parsing capabilities and use this to calculate the number of resources
1807 * per PF based on the max value passed in.
1809 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1813 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1814 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1815 ICE_CAPS_VALID_FUNCS_M);
1824 * ice_parse_caps - parse function/device capabilities
1825 * @hw: pointer to the HW struct
1826 * @buf: pointer to a buffer containing function/device capability records
1827 * @cap_count: number of capability records in the list
1828 * @opc: type of capabilities list to parse
1830 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1833 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1834 enum ice_adminq_opc opc)
1836 struct ice_aqc_list_caps_elem *cap_resp;
1837 struct ice_hw_func_caps *func_p = NULL;
1838 struct ice_hw_dev_caps *dev_p = NULL;
1839 struct ice_hw_common_caps *caps;
1846 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1848 if (opc == ice_aqc_opc_list_dev_caps) {
1849 dev_p = &hw->dev_caps;
1850 caps = &dev_p->common_cap;
1852 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
1855 } else if (opc == ice_aqc_opc_list_func_caps) {
1856 func_p = &hw->func_caps;
1857 caps = &func_p->common_cap;
1859 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
1861 prefix = "func cap";
1863 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1867 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1868 u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
1869 u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
1870 u32 number = LE32_TO_CPU(cap_resp->number);
1871 u16 cap = LE16_TO_CPU(cap_resp->cap);
1874 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1875 caps->valid_functions = number;
1876 ice_debug(hw, ICE_DBG_INIT,
1877 "%s: valid_functions (bitmap) = %d\n", prefix,
1878 caps->valid_functions);
1880 /* store func count for resource management purposes */
1882 dev_p->num_funcs = ice_hweight32(number);
1884 case ICE_AQC_CAPS_VSI:
1886 dev_p->num_vsi_allocd_to_host = number;
1887 ice_debug(hw, ICE_DBG_INIT,
1888 "%s: num_vsi_allocd_to_host = %d\n",
1890 dev_p->num_vsi_allocd_to_host);
1891 } else if (func_p) {
1892 func_p->guar_num_vsi =
1893 ice_get_num_per_func(hw, ICE_MAX_VSI);
1894 ice_debug(hw, ICE_DBG_INIT,
1895 "%s: guar_num_vsi (fw) = %d\n",
1897 ice_debug(hw, ICE_DBG_INIT,
1898 "%s: guar_num_vsi = %d\n",
1899 prefix, func_p->guar_num_vsi);
1902 case ICE_AQC_CAPS_DCB:
1903 caps->dcb = (number == 1);
1904 caps->active_tc_bitmap = logical_id;
1905 caps->maxtc = phys_id;
1906 ice_debug(hw, ICE_DBG_INIT,
1907 "%s: dcb = %d\n", prefix, caps->dcb);
1908 ice_debug(hw, ICE_DBG_INIT,
1909 "%s: active_tc_bitmap = %d\n", prefix,
1910 caps->active_tc_bitmap);
1911 ice_debug(hw, ICE_DBG_INIT,
1912 "%s: maxtc = %d\n", prefix, caps->maxtc);
1914 case ICE_AQC_CAPS_RSS:
1915 caps->rss_table_size = number;
1916 caps->rss_table_entry_width = logical_id;
1917 ice_debug(hw, ICE_DBG_INIT,
1918 "%s: rss_table_size = %d\n", prefix,
1919 caps->rss_table_size);
1920 ice_debug(hw, ICE_DBG_INIT,
1921 "%s: rss_table_entry_width = %d\n", prefix,
1922 caps->rss_table_entry_width);
1924 case ICE_AQC_CAPS_RXQS:
1925 caps->num_rxq = number;
1926 caps->rxq_first_id = phys_id;
1927 ice_debug(hw, ICE_DBG_INIT,
1928 "%s: num_rxq = %d\n", prefix,
1930 ice_debug(hw, ICE_DBG_INIT,
1931 "%s: rxq_first_id = %d\n", prefix,
1932 caps->rxq_first_id);
1934 case ICE_AQC_CAPS_TXQS:
1935 caps->num_txq = number;
1936 caps->txq_first_id = phys_id;
1937 ice_debug(hw, ICE_DBG_INIT,
1938 "%s: num_txq = %d\n", prefix,
1940 ice_debug(hw, ICE_DBG_INIT,
1941 "%s: txq_first_id = %d\n", prefix,
1942 caps->txq_first_id);
1944 case ICE_AQC_CAPS_MSIX:
1945 caps->num_msix_vectors = number;
1946 caps->msix_vector_first_id = phys_id;
1947 ice_debug(hw, ICE_DBG_INIT,
1948 "%s: num_msix_vectors = %d\n", prefix,
1949 caps->num_msix_vectors);
1950 ice_debug(hw, ICE_DBG_INIT,
1951 "%s: msix_vector_first_id = %d\n", prefix,
1952 caps->msix_vector_first_id);
1954 case ICE_AQC_CAPS_FD:
1956 dev_p->num_flow_director_fltr = number;
1957 ice_debug(hw, ICE_DBG_INIT,
1958 "%s: num_flow_director_fltr = %d\n",
1960 dev_p->num_flow_director_fltr);
1965 if (hw->dcf_enabled)
1967 reg_val = rd32(hw, GLQF_FD_SIZE);
1968 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
1969 GLQF_FD_SIZE_FD_GSIZE_S;
1970 func_p->fd_fltr_guar =
1971 ice_get_num_per_func(hw, val);
1972 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
1973 GLQF_FD_SIZE_FD_BSIZE_S;
1974 func_p->fd_fltr_best_effort = val;
1975 ice_debug(hw, ICE_DBG_INIT,
1976 "%s: fd_fltr_guar = %d\n",
1977 prefix, func_p->fd_fltr_guar);
1978 ice_debug(hw, ICE_DBG_INIT,
1979 "%s: fd_fltr_best_effort = %d\n",
1980 prefix, func_p->fd_fltr_best_effort);
1983 case ICE_AQC_CAPS_MAX_MTU:
1984 caps->max_mtu = number;
1985 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1986 prefix, caps->max_mtu);
1989 ice_debug(hw, ICE_DBG_INIT,
1990 "%s: unknown capability[%d]: 0x%x\n", prefix,
1996 /* Re-calculate capabilities that are dependent on the number of
1997 * physical ports; i.e. some features are not supported or function
1998 * differently on devices with more than 4 ports.
2000 if (hw->dev_caps.num_funcs > 4) {
2001 /* Max 4 TCs per port */
2003 ice_debug(hw, ICE_DBG_INIT,
2004 "%s: maxtc = %d (based on #ports)\n", prefix,
2010 * ice_aq_list_caps - query function/device capabilities
2011 * @hw: pointer to the HW struct
2012 * @buf: a buffer to hold the capabilities
2013 * @buf_size: size of the buffer
2014 * @cap_count: if not NULL, set to the number of capabilities reported
2015 * @opc: capabilities type to discover, device or function
2016 * @cd: pointer to command details structure or NULL
2018 * Get the function (0x000A) or device (0x000B) capabilities description from
2019 * firmware and store it in the buffer.
2021 * If the cap_count pointer is not NULL, then it is set to the number of
2022 * capabilities firmware will report. Note that if the buffer size is too
2023 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2024 * cap_count will still be updated in this case. It is recommended that the
2025 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2026 * firmware could return) to avoid this.
2028 static enum ice_status
2029 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2030 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2032 struct ice_aqc_list_caps *cmd;
2033 struct ice_aq_desc desc;
2034 enum ice_status status;
2036 cmd = &desc.params.get_cap;
2038 if (opc != ice_aqc_opc_list_func_caps &&
2039 opc != ice_aqc_opc_list_dev_caps)
2040 return ICE_ERR_PARAM;
2042 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2043 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2046 *cap_count = LE32_TO_CPU(cmd->count);
2052 * ice_aq_discover_caps - query function/device capabilities
2053 * @hw: pointer to the HW struct
2054 * @buf: a virtual buffer to hold the capabilities
2055 * @buf_size: Size of the virtual buffer
2056 * @cap_count: cap count needed if AQ err==ENOMEM
2057 * @opc: capabilities type to discover - pass in the command opcode
2058 * @cd: pointer to command details structure or NULL
2060 * Get the function(0x000a)/device(0x000b) capabilities description from
2063 * NOTE: this function has the side effect of updating the hw->dev_caps or
2064 * hw->func_caps by way of calling ice_parse_caps.
2066 static enum ice_status
2067 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2068 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2070 u32 local_cap_count = 0;
2071 enum ice_status status;
2073 status = ice_aq_list_caps(hw, buf, buf_size, &local_cap_count,
2076 ice_parse_caps(hw, buf, local_cap_count, opc);
2077 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
2078 *cap_count = local_cap_count;
2084 * ice_discover_caps - get info about the HW
2085 * @hw: pointer to the hardware structure
2086 * @opc: capabilities type to discover - pass in the command opcode
2088 static enum ice_status
2089 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
2091 enum ice_status status;
2095 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2097 return ICE_ERR_NO_MEMORY;
2099 /* Although the driver doesn't know the number of capabilities the
2100 * device will return, we can simply send a 4KB buffer, the maximum
2101 * possible size that firmware can return.
2103 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2105 status = ice_aq_discover_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2113 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2114 * @hw: pointer to the hardware structure
2116 void ice_set_safe_mode_caps(struct ice_hw *hw)
2118 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2119 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2120 u32 valid_func, rxq_first_id, txq_first_id;
2121 u32 msix_vector_first_id, max_mtu;
2124 /* cache some func_caps values that should be restored after memset */
2125 valid_func = func_caps->common_cap.valid_functions;
2126 txq_first_id = func_caps->common_cap.txq_first_id;
2127 rxq_first_id = func_caps->common_cap.rxq_first_id;
2128 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
2129 max_mtu = func_caps->common_cap.max_mtu;
2131 /* unset func capabilities */
2132 memset(func_caps, 0, sizeof(*func_caps));
2134 /* restore cached values */
2135 func_caps->common_cap.valid_functions = valid_func;
2136 func_caps->common_cap.txq_first_id = txq_first_id;
2137 func_caps->common_cap.rxq_first_id = rxq_first_id;
2138 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2139 func_caps->common_cap.max_mtu = max_mtu;
2141 /* one Tx and one Rx queue in safe mode */
2142 func_caps->common_cap.num_rxq = 1;
2143 func_caps->common_cap.num_txq = 1;
2145 /* two MSIX vectors, one for traffic and one for misc causes */
2146 func_caps->common_cap.num_msix_vectors = 2;
2147 func_caps->guar_num_vsi = 1;
2149 /* cache some dev_caps values that should be restored after memset */
2150 valid_func = dev_caps->common_cap.valid_functions;
2151 txq_first_id = dev_caps->common_cap.txq_first_id;
2152 rxq_first_id = dev_caps->common_cap.rxq_first_id;
2153 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
2154 max_mtu = dev_caps->common_cap.max_mtu;
2155 num_funcs = dev_caps->num_funcs;
2157 /* unset dev capabilities */
2158 memset(dev_caps, 0, sizeof(*dev_caps));
2160 /* restore cached values */
2161 dev_caps->common_cap.valid_functions = valid_func;
2162 dev_caps->common_cap.txq_first_id = txq_first_id;
2163 dev_caps->common_cap.rxq_first_id = rxq_first_id;
2164 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2165 dev_caps->common_cap.max_mtu = max_mtu;
2166 dev_caps->num_funcs = num_funcs;
2168 /* one Tx and one Rx queue per function in safe mode */
2169 dev_caps->common_cap.num_rxq = num_funcs;
2170 dev_caps->common_cap.num_txq = num_funcs;
2172 /* two MSIX vectors per function */
2173 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2177 * ice_get_caps - get info about the HW
2178 * @hw: pointer to the hardware structure
2180 enum ice_status ice_get_caps(struct ice_hw *hw)
2182 enum ice_status status;
2184 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
2186 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
2192 * ice_aq_manage_mac_write - manage MAC address write command
2193 * @hw: pointer to the HW struct
2194 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2195 * @flags: flags to control write behavior
2196 * @cd: pointer to command details structure or NULL
2198 * This function is used to write MAC address to the NVM (0x0108).
2201 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2202 struct ice_sq_cd *cd)
2204 struct ice_aqc_manage_mac_write *cmd;
2205 struct ice_aq_desc desc;
2207 cmd = &desc.params.mac_write;
2208 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2211 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
2213 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2217 * ice_aq_clear_pxe_mode
2218 * @hw: pointer to the HW struct
2220 * Tell the firmware that the driver is taking over from PXE (0x0110).
2222 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2224 struct ice_aq_desc desc;
2226 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2227 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2229 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2233 * ice_clear_pxe_mode - clear pxe operations mode
2234 * @hw: pointer to the HW struct
2236 * Make sure all PXE mode settings are cleared, including things
2237 * like descriptor fetch/write-back mode.
2239 void ice_clear_pxe_mode(struct ice_hw *hw)
2241 if (ice_check_sq_alive(hw, &hw->adminq))
2242 ice_aq_clear_pxe_mode(hw);
2246 * ice_get_link_speed_based_on_phy_type - returns link speed
2247 * @phy_type_low: lower part of phy_type
2248 * @phy_type_high: higher part of phy_type
2250 * This helper function will convert an entry in PHY type structure
2251 * [phy_type_low, phy_type_high] to its corresponding link speed.
2252 * Note: In the structure of [phy_type_low, phy_type_high], there should
2253 * be one bit set, as this function will convert one PHY type to its
2255 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2256 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2259 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2261 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2262 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2264 switch (phy_type_low) {
2265 case ICE_PHY_TYPE_LOW_100BASE_TX:
2266 case ICE_PHY_TYPE_LOW_100M_SGMII:
2267 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2269 case ICE_PHY_TYPE_LOW_1000BASE_T:
2270 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2271 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2272 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2273 case ICE_PHY_TYPE_LOW_1G_SGMII:
2274 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2276 case ICE_PHY_TYPE_LOW_2500BASE_T:
2277 case ICE_PHY_TYPE_LOW_2500BASE_X:
2278 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2279 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2281 case ICE_PHY_TYPE_LOW_5GBASE_T:
2282 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2283 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2285 case ICE_PHY_TYPE_LOW_10GBASE_T:
2286 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2287 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2288 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2289 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2290 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2291 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2292 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2294 case ICE_PHY_TYPE_LOW_25GBASE_T:
2295 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2296 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2297 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2298 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2299 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2300 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2301 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2302 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2303 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2304 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2305 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2307 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2308 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2309 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2310 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2311 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2312 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2313 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2315 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2316 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2317 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2318 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2319 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2320 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2321 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2322 case ICE_PHY_TYPE_LOW_50G_AUI2:
2323 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2324 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2325 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2326 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2327 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2328 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2329 case ICE_PHY_TYPE_LOW_50G_AUI1:
2330 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2332 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2333 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2334 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2335 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2336 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2337 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2338 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2339 case ICE_PHY_TYPE_LOW_100G_AUI4:
2340 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2341 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2342 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2343 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2344 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2345 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2348 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2352 switch (phy_type_high) {
2353 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2354 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2355 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2356 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2357 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2358 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2361 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2365 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2366 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2367 return ICE_AQ_LINK_SPEED_UNKNOWN;
2368 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2369 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2370 return ICE_AQ_LINK_SPEED_UNKNOWN;
2371 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2372 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2373 return speed_phy_type_low;
2375 return speed_phy_type_high;
2379 * ice_update_phy_type
2380 * @phy_type_low: pointer to the lower part of phy_type
2381 * @phy_type_high: pointer to the higher part of phy_type
2382 * @link_speeds_bitmap: targeted link speeds bitmap
2384 * Note: For the link_speeds_bitmap structure, you can check it at
2385 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2386 * link_speeds_bitmap include multiple speeds.
2388 * Each entry in this [phy_type_low, phy_type_high] structure will
2389 * present a certain link speed. This helper function will turn on bits
2390 * in [phy_type_low, phy_type_high] structure based on the value of
2391 * link_speeds_bitmap input parameter.
2394 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2395 u16 link_speeds_bitmap)
2402 /* We first check with low part of phy_type */
2403 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2404 pt_low = BIT_ULL(index);
2405 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2407 if (link_speeds_bitmap & speed)
2408 *phy_type_low |= BIT_ULL(index);
2411 /* We then check with high part of phy_type */
2412 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2413 pt_high = BIT_ULL(index);
2414 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2416 if (link_speeds_bitmap & speed)
2417 *phy_type_high |= BIT_ULL(index);
2422 * ice_aq_set_phy_cfg
2423 * @hw: pointer to the HW struct
2424 * @pi: port info structure of the interested logical port
2425 * @cfg: structure with PHY configuration data to be set
2426 * @cd: pointer to command details structure or NULL
2428 * Set the various PHY configuration parameters supported on the Port.
2429 * One or more of the Set PHY config parameters may be ignored in an MFP
2430 * mode as the PF may not have the privilege to set some of the PHY Config
2431 * parameters. This status will be indicated by the command response (0x0601).
2434 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2435 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2437 struct ice_aq_desc desc;
2438 enum ice_status status;
2441 return ICE_ERR_PARAM;
2443 /* Ensure that only valid bits of cfg->caps can be turned on. */
2444 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2445 ice_debug(hw, ICE_DBG_PHY,
2446 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2449 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2452 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2453 desc.params.set_phy.lport_num = pi->lport;
2454 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2456 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2457 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2458 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2459 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2460 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2461 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2462 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2463 cfg->low_power_ctrl_an);
2464 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2465 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2466 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2469 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2471 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2472 status = ICE_SUCCESS;
2475 pi->phy.curr_user_phy_cfg = *cfg;
2481 * ice_update_link_info - update status of the HW network link
2482 * @pi: port info structure of the interested logical port
2484 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2486 struct ice_link_status *li;
2487 enum ice_status status;
2490 return ICE_ERR_PARAM;
2492 li = &pi->phy.link_info;
2494 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2498 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2499 struct ice_aqc_get_phy_caps_data *pcaps;
2503 pcaps = (struct ice_aqc_get_phy_caps_data *)
2504 ice_malloc(hw, sizeof(*pcaps));
2506 return ICE_ERR_NO_MEMORY;
2508 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2511 ice_free(hw, pcaps);
2518 * ice_cache_phy_user_req
2519 * @pi: port information structure
2520 * @cache_data: PHY logging data
2521 * @cache_mode: PHY logging mode
2523 * Log the user request on (FC, FEC, SPEED) for later user.
2526 ice_cache_phy_user_req(struct ice_port_info *pi,
2527 struct ice_phy_cache_mode_data cache_data,
2528 enum ice_phy_cache_mode cache_mode)
2533 switch (cache_mode) {
2535 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2537 case ICE_SPEED_MODE:
2538 pi->phy.curr_user_speed_req =
2539 cache_data.data.curr_user_speed_req;
2542 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2550 * ice_caps_to_fc_mode
2551 * @caps: PHY capabilities
2553 * Convert PHY FC capabilities to ice FC mode
2555 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2557 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2558 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2561 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2562 return ICE_FC_TX_PAUSE;
2564 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2565 return ICE_FC_RX_PAUSE;
2571 * ice_caps_to_fec_mode
2572 * @caps: PHY capabilities
2573 * @fec_options: Link FEC options
2575 * Convert PHY FEC capabilities to ice FEC mode
2577 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2579 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2580 return ICE_FEC_AUTO;
2582 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2583 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2584 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2585 ICE_AQC_PHY_FEC_25G_KR_REQ))
2586 return ICE_FEC_BASER;
2588 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2589 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2590 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2593 return ICE_FEC_NONE;
2596 static enum ice_status
2597 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2598 enum ice_fc_mode req_mode)
2600 struct ice_aqc_get_phy_caps_data *pcaps = NULL;
2601 struct ice_phy_cache_mode_data cache_data;
2602 enum ice_status status = ICE_SUCCESS;
2603 u8 pause_mask = 0x0;
2606 return ICE_ERR_BAD_PTR;
2608 pcaps = (struct ice_aqc_get_phy_caps_data *)
2609 ice_malloc(pi->hw, sizeof(*pcaps));
2611 return ICE_ERR_NO_MEMORY;
2613 /* Cache user FC request */
2614 cache_data.data.curr_user_fc_req = req_mode;
2615 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2619 /* Query the value of FC that both the NIC and attached media
2622 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2627 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2628 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2631 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2632 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2634 case ICE_FC_RX_PAUSE:
2635 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2637 case ICE_FC_TX_PAUSE:
2638 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2644 /* clear the old pause settings */
2645 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2646 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2648 /* set the new capabilities */
2649 cfg->caps |= pause_mask;
2652 ice_free(pi->hw, pcaps);
2658 * @pi: port information structure
2659 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2660 * @ena_auto_link_update: enable automatic link update
2662 * Set the requested flow control mode.
2665 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2667 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2668 struct ice_aqc_get_phy_caps_data *pcaps;
2669 enum ice_status status;
2672 if (!pi || !aq_failures)
2673 return ICE_ERR_BAD_PTR;
2678 pcaps = (struct ice_aqc_get_phy_caps_data *)
2679 ice_malloc(hw, sizeof(*pcaps));
2681 return ICE_ERR_NO_MEMORY;
2683 /* Get the current PHY config */
2684 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2687 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2691 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2693 /* Configure the set PHY data */
2694 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2696 if (status != ICE_ERR_BAD_PTR)
2697 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2702 /* If the capabilities have changed, then set the new config */
2703 if (cfg.caps != pcaps->caps) {
2704 int retry_count, retry_max = 10;
2706 /* Auto restart link so settings take effect */
2707 if (ena_auto_link_update)
2708 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2710 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2712 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2716 /* Update the link info
2717 * It sometimes takes a really long time for link to
2718 * come back from the atomic reset. Thus, we wait a
2721 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2722 status = ice_update_link_info(pi);
2724 if (status == ICE_SUCCESS)
2727 ice_msec_delay(100, true);
2731 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2735 ice_free(hw, pcaps);
2740 * ice_phy_caps_equals_cfg
2741 * @phy_caps: PHY capabilities
2742 * @phy_cfg: PHY configuration
2744 * Helper function to determine if PHY capabilities matches PHY
2748 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2749 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2751 u8 caps_mask, cfg_mask;
2753 if (!phy_caps || !phy_cfg)
2756 /* These bits are not common between capabilities and configuration.
2757 * Do not use them to determine equality.
2759 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2760 ICE_AQC_PHY_EN_MOD_QUAL);
2761 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2763 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2764 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2765 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2766 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2767 phy_caps->eee_cap != phy_cfg->eee_cap ||
2768 phy_caps->eeer_value != phy_cfg->eeer_value ||
2769 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2776 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2777 * @pi: port information structure
2778 * @caps: PHY ability structure to copy date from
2779 * @cfg: PHY configuration structure to copy data to
2781 * Helper function to copy AQC PHY get ability data to PHY set configuration
2785 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2786 struct ice_aqc_get_phy_caps_data *caps,
2787 struct ice_aqc_set_phy_cfg_data *cfg)
2789 if (!pi || !caps || !cfg)
2792 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
2793 cfg->phy_type_low = caps->phy_type_low;
2794 cfg->phy_type_high = caps->phy_type_high;
2795 cfg->caps = caps->caps;
2796 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2797 cfg->eee_cap = caps->eee_cap;
2798 cfg->eeer_value = caps->eeer_value;
2799 cfg->link_fec_opt = caps->link_fec_options;
2800 cfg->module_compliance_enforcement =
2801 caps->module_compliance_enforcement;
2803 if (ice_fw_supports_link_override(pi->hw)) {
2804 struct ice_link_default_override_tlv tlv;
2806 if (ice_get_link_default_override(&tlv, pi))
2809 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2810 cfg->module_compliance_enforcement |=
2811 ICE_LINK_OVERRIDE_STRICT_MODE;
2816 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2817 * @pi: port information structure
2818 * @cfg: PHY configuration data to set FEC mode
2819 * @fec: FEC mode to configure
2822 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2823 enum ice_fec_mode fec)
2825 struct ice_aqc_get_phy_caps_data *pcaps;
2826 enum ice_status status = ICE_SUCCESS;
2830 return ICE_ERR_BAD_PTR;
2834 pcaps = (struct ice_aqc_get_phy_caps_data *)
2835 ice_malloc(hw, sizeof(*pcaps));
2837 return ICE_ERR_NO_MEMORY;
2839 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
2844 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
2845 cfg->link_fec_opt = pcaps->link_fec_options;
2849 /* Clear RS bits, and AND BASE-R ability
2850 * bits and OR request bits.
2852 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2853 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2854 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2855 ICE_AQC_PHY_FEC_25G_KR_REQ;
2858 /* Clear BASE-R bits, and AND RS ability
2859 * bits and OR request bits.
2861 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2862 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2863 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2866 /* Clear all FEC option bits. */
2867 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2870 /* AND auto FEC bit, and all caps bits. */
2871 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2872 cfg->link_fec_opt |= pcaps->link_fec_options;
2875 status = ICE_ERR_PARAM;
2879 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
2880 struct ice_link_default_override_tlv tlv;
2882 if (ice_get_link_default_override(&tlv, pi))
2885 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
2886 (tlv.options & ICE_LINK_OVERRIDE_EN))
2887 cfg->link_fec_opt = tlv.fec_options;
2891 ice_free(hw, pcaps);
2897 * ice_get_link_status - get status of the HW network link
2898 * @pi: port information structure
2899 * @link_up: pointer to bool (true/false = linkup/linkdown)
2901 * Variable link_up is true if link is up, false if link is down.
2902 * The variable link_up is invalid if status is non zero. As a
2903 * result of this call, link status reporting becomes enabled
2905 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2907 struct ice_phy_info *phy_info;
2908 enum ice_status status = ICE_SUCCESS;
2910 if (!pi || !link_up)
2911 return ICE_ERR_PARAM;
2913 phy_info = &pi->phy;
2915 if (phy_info->get_link_info) {
2916 status = ice_update_link_info(pi);
2919 ice_debug(pi->hw, ICE_DBG_LINK,
2920 "get link status error, status = %d\n",
2924 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2930 * ice_aq_set_link_restart_an
2931 * @pi: pointer to the port information structure
2932 * @ena_link: if true: enable link, if false: disable link
2933 * @cd: pointer to command details structure or NULL
2935 * Sets up the link and restarts the Auto-Negotiation over the link.
2938 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2939 struct ice_sq_cd *cd)
2941 struct ice_aqc_restart_an *cmd;
2942 struct ice_aq_desc desc;
2944 cmd = &desc.params.restart_an;
2946 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2948 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2949 cmd->lport_num = pi->lport;
2951 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2953 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2955 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2959 * ice_aq_set_event_mask
2960 * @hw: pointer to the HW struct
2961 * @port_num: port number of the physical function
2962 * @mask: event mask to be set
2963 * @cd: pointer to command details structure or NULL
2965 * Set event mask (0x0613)
2968 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2969 struct ice_sq_cd *cd)
2971 struct ice_aqc_set_event_mask *cmd;
2972 struct ice_aq_desc desc;
2974 cmd = &desc.params.set_event_mask;
2976 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2978 cmd->lport_num = port_num;
2980 cmd->event_mask = CPU_TO_LE16(mask);
2981 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2985 * ice_aq_set_mac_loopback
2986 * @hw: pointer to the HW struct
2987 * @ena_lpbk: Enable or Disable loopback
2988 * @cd: pointer to command details structure or NULL
2990 * Enable/disable loopback on a given port
2993 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2995 struct ice_aqc_set_mac_lb *cmd;
2996 struct ice_aq_desc desc;
2998 cmd = &desc.params.set_mac_lb;
3000 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3002 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3004 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3008 * ice_aq_set_port_id_led
3009 * @pi: pointer to the port information
3010 * @is_orig_mode: is this LED set to original mode (by the net-list)
3011 * @cd: pointer to command details structure or NULL
3013 * Set LED value for the given port (0x06e9)
3016 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3017 struct ice_sq_cd *cd)
3019 struct ice_aqc_set_port_id_led *cmd;
3020 struct ice_hw *hw = pi->hw;
3021 struct ice_aq_desc desc;
3023 cmd = &desc.params.set_port_id_led;
3025 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3028 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3030 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3032 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3037 * @hw: pointer to the HW struct
3038 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3039 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3040 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3042 * @set_page: set or ignore the page
3043 * @data: pointer to data buffer to be read/written to the I2C device.
3044 * @length: 1-16 for read, 1 for write.
3045 * @write: 0 read, 1 for write.
3046 * @cd: pointer to command details structure or NULL
3048 * Read/Write SFF EEPROM (0x06EE)
3051 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3052 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3053 bool write, struct ice_sq_cd *cd)
3055 struct ice_aqc_sff_eeprom *cmd;
3056 struct ice_aq_desc desc;
3057 enum ice_status status;
3059 if (!data || (mem_addr & 0xff00))
3060 return ICE_ERR_PARAM;
3062 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3063 cmd = &desc.params.read_write_sff_param;
3064 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3065 cmd->lport_num = (u8)(lport & 0xff);
3066 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3067 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3068 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3070 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3071 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3072 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3073 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3075 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3077 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3082 * __ice_aq_get_set_rss_lut
3083 * @hw: pointer to the hardware structure
3084 * @vsi_id: VSI FW index
3085 * @lut_type: LUT table type
3086 * @lut: pointer to the LUT buffer provided by the caller
3087 * @lut_size: size of the LUT buffer
3088 * @glob_lut_idx: global LUT index
3089 * @set: set true to set the table, false to get the table
3091 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3093 static enum ice_status
3094 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3095 u16 lut_size, u8 glob_lut_idx, bool set)
3097 struct ice_aqc_get_set_rss_lut *cmd_resp;
3098 struct ice_aq_desc desc;
3099 enum ice_status status;
3102 cmd_resp = &desc.params.get_set_rss_lut;
3105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3106 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3108 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3111 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3112 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3113 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3114 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3117 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3118 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3119 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3120 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3121 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3124 status = ICE_ERR_PARAM;
3125 goto ice_aq_get_set_rss_lut_exit;
3128 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3129 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3130 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3133 goto ice_aq_get_set_rss_lut_send;
3134 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3136 goto ice_aq_get_set_rss_lut_send;
3138 goto ice_aq_get_set_rss_lut_send;
3141 /* LUT size is only valid for Global and PF table types */
3143 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3144 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3145 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3146 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3148 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3149 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3150 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3151 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3153 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3154 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3155 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3156 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3157 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3162 status = ICE_ERR_PARAM;
3163 goto ice_aq_get_set_rss_lut_exit;
3166 ice_aq_get_set_rss_lut_send:
3167 cmd_resp->flags = CPU_TO_LE16(flags);
3168 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3170 ice_aq_get_set_rss_lut_exit:
3175 * ice_aq_get_rss_lut
3176 * @hw: pointer to the hardware structure
3177 * @vsi_handle: software VSI handle
3178 * @lut_type: LUT table type
3179 * @lut: pointer to the LUT buffer provided by the caller
3180 * @lut_size: size of the LUT buffer
3182 * get the RSS lookup table, PF or VSI type
3185 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3186 u8 *lut, u16 lut_size)
3188 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3189 return ICE_ERR_PARAM;
3191 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3192 lut_type, lut, lut_size, 0, false);
3196 * ice_aq_set_rss_lut
3197 * @hw: pointer to the hardware structure
3198 * @vsi_handle: software VSI handle
3199 * @lut_type: LUT table type
3200 * @lut: pointer to the LUT buffer provided by the caller
3201 * @lut_size: size of the LUT buffer
3203 * set the RSS lookup table, PF or VSI type
3206 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3207 u8 *lut, u16 lut_size)
3209 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3210 return ICE_ERR_PARAM;
3212 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3213 lut_type, lut, lut_size, 0, true);
3217 * __ice_aq_get_set_rss_key
3218 * @hw: pointer to the HW struct
3219 * @vsi_id: VSI FW index
3220 * @key: pointer to key info struct
3221 * @set: set true to set the key, false to get the key
3223 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3226 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3227 struct ice_aqc_get_set_rss_keys *key,
3230 struct ice_aqc_get_set_rss_key *cmd_resp;
3231 u16 key_size = sizeof(*key);
3232 struct ice_aq_desc desc;
3234 cmd_resp = &desc.params.get_set_rss_key;
3237 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3238 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3240 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3243 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3244 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3245 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3246 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3248 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3252 * ice_aq_get_rss_key
3253 * @hw: pointer to the HW struct
3254 * @vsi_handle: software VSI handle
3255 * @key: pointer to key info struct
3257 * get the RSS key per VSI
3260 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3261 struct ice_aqc_get_set_rss_keys *key)
3263 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3264 return ICE_ERR_PARAM;
3266 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3271 * ice_aq_set_rss_key
3272 * @hw: pointer to the HW struct
3273 * @vsi_handle: software VSI handle
3274 * @keys: pointer to key info struct
3276 * set the RSS key per VSI
3279 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3280 struct ice_aqc_get_set_rss_keys *keys)
3282 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3283 return ICE_ERR_PARAM;
3285 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3290 * ice_aq_add_lan_txq
3291 * @hw: pointer to the hardware structure
3292 * @num_qgrps: Number of added queue groups
3293 * @qg_list: list of queue groups to be added
3294 * @buf_size: size of buffer for indirect command
3295 * @cd: pointer to command details structure or NULL
3297 * Add Tx LAN queue (0x0C30)
3300 * Prior to calling add Tx LAN queue:
3301 * Initialize the following as part of the Tx queue context:
3302 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3303 * Cache profile and Packet shaper profile.
3305 * After add Tx LAN queue AQ command is completed:
3306 * Interrupts should be associated with specific queues,
3307 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3311 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3312 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3313 struct ice_sq_cd *cd)
3315 u16 i, sum_header_size, sum_q_size = 0;
3316 struct ice_aqc_add_tx_qgrp *list;
3317 struct ice_aqc_add_txqs *cmd;
3318 struct ice_aq_desc desc;
3320 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3322 cmd = &desc.params.add_txqs;
3324 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3327 return ICE_ERR_PARAM;
3329 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3330 return ICE_ERR_PARAM;
3332 sum_header_size = num_qgrps *
3333 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
3336 for (i = 0; i < num_qgrps; i++) {
3337 struct ice_aqc_add_txqs_perq *q = list->txqs;
3339 sum_q_size += list->num_txqs * sizeof(*q);
3340 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
3343 if (buf_size != (sum_header_size + sum_q_size))
3344 return ICE_ERR_PARAM;
3346 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3348 cmd->num_qgrps = num_qgrps;
3350 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3354 * ice_aq_dis_lan_txq
3355 * @hw: pointer to the hardware structure
3356 * @num_qgrps: number of groups in the list
3357 * @qg_list: the list of groups to disable
3358 * @buf_size: the total size of the qg_list buffer in bytes
3359 * @rst_src: if called due to reset, specifies the reset source
3360 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3361 * @cd: pointer to command details structure or NULL
3363 * Disable LAN Tx queue (0x0C31)
3365 static enum ice_status
3366 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3367 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3368 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3369 struct ice_sq_cd *cd)
3371 struct ice_aqc_dis_txqs *cmd;
3372 struct ice_aq_desc desc;
3373 enum ice_status status;
3376 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3377 cmd = &desc.params.dis_txqs;
3378 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3380 /* qg_list can be NULL only in VM/VF reset flow */
3381 if (!qg_list && !rst_src)
3382 return ICE_ERR_PARAM;
3384 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3385 return ICE_ERR_PARAM;
3387 cmd->num_entries = num_qgrps;
3389 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3390 ICE_AQC_Q_DIS_TIMEOUT_M);
3394 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3395 cmd->vmvf_and_timeout |=
3396 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3403 /* flush pipe on time out */
3404 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3405 /* If no queue group info, we are in a reset flow. Issue the AQ */
3409 /* set RD bit to indicate that command buffer is provided by the driver
3410 * and it needs to be read by the firmware
3412 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3414 for (i = 0; i < num_qgrps; ++i) {
3415 /* Calculate the size taken up by the queue IDs in this group */
3416 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
3418 /* Add the size of the group header */
3419 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
3421 /* If the num of queues is even, add 2 bytes of padding */
3422 if ((qg_list[i].num_qs % 2) == 0)
3427 return ICE_ERR_PARAM;
3430 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3433 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3434 vmvf_num, hw->adminq.sq_last_status);
3436 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3437 LE16_TO_CPU(qg_list[0].q_id[0]),
3438 hw->adminq.sq_last_status);
3444 * ice_aq_move_recfg_lan_txq
3445 * @hw: pointer to the hardware structure
3446 * @num_qs: number of queues to move/reconfigure
3447 * @is_move: true if this operation involves node movement
3448 * @is_tc_change: true if this operation involves a TC change
3449 * @subseq_call: true if this operation is a subsequent call
3450 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3451 * @timeout: timeout in units of 100 usec (valid values 0-50)
3452 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3453 * @buf: struct containing src/dest TEID and per-queue info
3454 * @buf_size: size of buffer for indirect command
3455 * @txqs_moved: out param, number of queues successfully moved
3456 * @cd: pointer to command details structure or NULL
3458 * Move / Reconfigure Tx LAN queues (0x0C32)
3461 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3462 bool is_tc_change, bool subseq_call, bool flush_pipe,
3463 u8 timeout, u32 *blocked_cgds,
3464 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3465 u8 *txqs_moved, struct ice_sq_cd *cd)
3467 struct ice_aqc_move_txqs *cmd;
3468 struct ice_aq_desc desc;
3469 enum ice_status status;
3471 cmd = &desc.params.move_txqs;
3472 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3474 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3475 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3476 return ICE_ERR_PARAM;
3478 if (is_tc_change && !flush_pipe && !blocked_cgds)
3479 return ICE_ERR_PARAM;
3481 if (!is_move && !is_tc_change)
3482 return ICE_ERR_PARAM;
3484 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3487 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3490 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3493 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3496 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3498 cmd->num_qs = num_qs;
3499 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3500 ICE_AQC_Q_CMD_TIMEOUT_M);
3502 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3504 if (!status && txqs_moved)
3505 *txqs_moved = cmd->num_qs;
3507 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3508 is_tc_change && !flush_pipe)
3509 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3514 /* End of FW Admin Queue command wrappers */
3517 * ice_write_byte - write a byte to a packed context structure
3518 * @src_ctx: the context structure to read from
3519 * @dest_ctx: the context to be written to
3520 * @ce_info: a description of the struct to be filled
3523 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3525 u8 src_byte, dest_byte, mask;
3529 /* copy from the next struct field */
3530 from = src_ctx + ce_info->offset;
3532 /* prepare the bits and mask */
3533 shift_width = ce_info->lsb % 8;
3534 mask = (u8)(BIT(ce_info->width) - 1);
3539 /* shift to correct alignment */
3540 mask <<= shift_width;
3541 src_byte <<= shift_width;
3543 /* get the current bits from the target bit string */
3544 dest = dest_ctx + (ce_info->lsb / 8);
3546 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3548 dest_byte &= ~mask; /* get the bits not changing */
3549 dest_byte |= src_byte; /* add in the new bits */
3551 /* put it all back */
3552 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3556 * ice_write_word - write a word to a packed context structure
3557 * @src_ctx: the context structure to read from
3558 * @dest_ctx: the context to be written to
3559 * @ce_info: a description of the struct to be filled
3562 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3569 /* copy from the next struct field */
3570 from = src_ctx + ce_info->offset;
3572 /* prepare the bits and mask */
3573 shift_width = ce_info->lsb % 8;
3574 mask = BIT(ce_info->width) - 1;
3576 /* don't swizzle the bits until after the mask because the mask bits
3577 * will be in a different bit position on big endian machines
3579 src_word = *(u16 *)from;
3582 /* shift to correct alignment */
3583 mask <<= shift_width;
3584 src_word <<= shift_width;
3586 /* get the current bits from the target bit string */
3587 dest = dest_ctx + (ce_info->lsb / 8);
3589 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3591 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3592 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3594 /* put it all back */
3595 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3599 * ice_write_dword - write a dword to a packed context structure
3600 * @src_ctx: the context structure to read from
3601 * @dest_ctx: the context to be written to
3602 * @ce_info: a description of the struct to be filled
3605 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3607 u32 src_dword, mask;
3612 /* copy from the next struct field */
3613 from = src_ctx + ce_info->offset;
3615 /* prepare the bits and mask */
3616 shift_width = ce_info->lsb % 8;
3618 /* if the field width is exactly 32 on an x86 machine, then the shift
3619 * operation will not work because the SHL instructions count is masked
3620 * to 5 bits so the shift will do nothing
3622 if (ce_info->width < 32)
3623 mask = BIT(ce_info->width) - 1;
3627 /* don't swizzle the bits until after the mask because the mask bits
3628 * will be in a different bit position on big endian machines
3630 src_dword = *(u32 *)from;
3633 /* shift to correct alignment */
3634 mask <<= shift_width;
3635 src_dword <<= shift_width;
3637 /* get the current bits from the target bit string */
3638 dest = dest_ctx + (ce_info->lsb / 8);
3640 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3642 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3643 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3645 /* put it all back */
3646 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3650 * ice_write_qword - write a qword to a packed context structure
3651 * @src_ctx: the context structure to read from
3652 * @dest_ctx: the context to be written to
3653 * @ce_info: a description of the struct to be filled
3656 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3658 u64 src_qword, mask;
3663 /* copy from the next struct field */
3664 from = src_ctx + ce_info->offset;
3666 /* prepare the bits and mask */
3667 shift_width = ce_info->lsb % 8;
3669 /* if the field width is exactly 64 on an x86 machine, then the shift
3670 * operation will not work because the SHL instructions count is masked
3671 * to 6 bits so the shift will do nothing
3673 if (ce_info->width < 64)
3674 mask = BIT_ULL(ce_info->width) - 1;
3678 /* don't swizzle the bits until after the mask because the mask bits
3679 * will be in a different bit position on big endian machines
3681 src_qword = *(u64 *)from;
3684 /* shift to correct alignment */
3685 mask <<= shift_width;
3686 src_qword <<= shift_width;
3688 /* get the current bits from the target bit string */
3689 dest = dest_ctx + (ce_info->lsb / 8);
3691 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3693 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
3694 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
3696 /* put it all back */
3697 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3701 * ice_set_ctx - set context bits in packed structure
3702 * @hw: pointer to the hardware structure
3703 * @src_ctx: pointer to a generic non-packed context structure
3704 * @dest_ctx: pointer to memory for the packed structure
3705 * @ce_info: a description of the structure to be transformed
3708 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3709 const struct ice_ctx_ele *ce_info)
3713 for (f = 0; ce_info[f].width; f++) {
3714 /* We have to deal with each element of the FW response
3715 * using the correct size so that we are correct regardless
3716 * of the endianness of the machine.
3718 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3719 ice_debug(hw, ICE_DBG_QCTX,
3720 "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3721 f, ce_info[f].width, ce_info[f].size_of);
3724 switch (ce_info[f].size_of) {
3726 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3729 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3732 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3735 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3738 return ICE_ERR_INVAL_SIZE;
3746 * ice_read_byte - read context byte into struct
3747 * @src_ctx: the context structure to read from
3748 * @dest_ctx: the context to be written to
3749 * @ce_info: a description of the struct to be filled
3752 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3758 /* prepare the bits and mask */
3759 shift_width = ce_info->lsb % 8;
3760 mask = (u8)(BIT(ce_info->width) - 1);
3762 /* shift to correct alignment */
3763 mask <<= shift_width;
3765 /* get the current bits from the src bit string */
3766 src = src_ctx + (ce_info->lsb / 8);
3768 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3770 dest_byte &= ~(mask);
3772 dest_byte >>= shift_width;
3774 /* get the address from the struct field */
3775 target = dest_ctx + ce_info->offset;
3777 /* put it back in the struct */
3778 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3782 * ice_read_word - read context word into struct
3783 * @src_ctx: the context structure to read from
3784 * @dest_ctx: the context to be written to
3785 * @ce_info: a description of the struct to be filled
3788 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3790 u16 dest_word, mask;
3795 /* prepare the bits and mask */
3796 shift_width = ce_info->lsb % 8;
3797 mask = BIT(ce_info->width) - 1;
3799 /* shift to correct alignment */
3800 mask <<= shift_width;
3802 /* get the current bits from the src bit string */
3803 src = src_ctx + (ce_info->lsb / 8);
3805 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
3807 /* the data in the memory is stored as little endian so mask it
3810 src_word &= ~(CPU_TO_LE16(mask));
3812 /* get the data back into host order before shifting */
3813 dest_word = LE16_TO_CPU(src_word);
3815 dest_word >>= shift_width;
3817 /* get the address from the struct field */
3818 target = dest_ctx + ce_info->offset;
3820 /* put it back in the struct */
3821 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3825 * ice_read_dword - read context dword into struct
3826 * @src_ctx: the context structure to read from
3827 * @dest_ctx: the context to be written to
3828 * @ce_info: a description of the struct to be filled
3831 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3833 u32 dest_dword, mask;
3838 /* prepare the bits and mask */
3839 shift_width = ce_info->lsb % 8;
3841 /* if the field width is exactly 32 on an x86 machine, then the shift
3842 * operation will not work because the SHL instructions count is masked
3843 * to 5 bits so the shift will do nothing
3845 if (ce_info->width < 32)
3846 mask = BIT(ce_info->width) - 1;
3850 /* shift to correct alignment */
3851 mask <<= shift_width;
3853 /* get the current bits from the src bit string */
3854 src = src_ctx + (ce_info->lsb / 8);
3856 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
3858 /* the data in the memory is stored as little endian so mask it
3861 src_dword &= ~(CPU_TO_LE32(mask));
3863 /* get the data back into host order before shifting */
3864 dest_dword = LE32_TO_CPU(src_dword);
3866 dest_dword >>= shift_width;
3868 /* get the address from the struct field */
3869 target = dest_ctx + ce_info->offset;
3871 /* put it back in the struct */
3872 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3876 * ice_read_qword - read context qword into struct
3877 * @src_ctx: the context structure to read from
3878 * @dest_ctx: the context to be written to
3879 * @ce_info: a description of the struct to be filled
3882 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3884 u64 dest_qword, mask;
3889 /* prepare the bits and mask */
3890 shift_width = ce_info->lsb % 8;
3892 /* if the field width is exactly 64 on an x86 machine, then the shift
3893 * operation will not work because the SHL instructions count is masked
3894 * to 6 bits so the shift will do nothing
3896 if (ce_info->width < 64)
3897 mask = BIT_ULL(ce_info->width) - 1;
3901 /* shift to correct alignment */
3902 mask <<= shift_width;
3904 /* get the current bits from the src bit string */
3905 src = src_ctx + (ce_info->lsb / 8);
3907 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
3909 /* the data in the memory is stored as little endian so mask it
3912 src_qword &= ~(CPU_TO_LE64(mask));
3914 /* get the data back into host order before shifting */
3915 dest_qword = LE64_TO_CPU(src_qword);
3917 dest_qword >>= shift_width;
3919 /* get the address from the struct field */
3920 target = dest_ctx + ce_info->offset;
3922 /* put it back in the struct */
3923 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3927 * ice_get_ctx - extract context bits from a packed structure
3928 * @src_ctx: pointer to a generic packed context structure
3929 * @dest_ctx: pointer to a generic non-packed context structure
3930 * @ce_info: a description of the structure to be read from
3933 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3937 for (f = 0; ce_info[f].width; f++) {
3938 switch (ce_info[f].size_of) {
3940 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
3943 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
3946 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
3949 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
3952 /* nothing to do, just keep going */
3961 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3962 * @hw: pointer to the HW struct
3963 * @vsi_handle: software VSI handle
3965 * @q_handle: software queue handle
3968 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3970 struct ice_vsi_ctx *vsi;
3971 struct ice_q_ctx *q_ctx;
3973 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3976 if (q_handle >= vsi->num_lan_q_entries[tc])
3978 if (!vsi->lan_q_ctx[tc])
3980 q_ctx = vsi->lan_q_ctx[tc];
3981 return &q_ctx[q_handle];
3986 * @pi: port information structure
3987 * @vsi_handle: software VSI handle
3989 * @q_handle: software queue handle
3990 * @num_qgrps: Number of added queue groups
3991 * @buf: list of queue groups to be added
3992 * @buf_size: size of buffer for indirect command
3993 * @cd: pointer to command details structure or NULL
3995 * This function adds one LAN queue
3998 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3999 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4000 struct ice_sq_cd *cd)
4002 struct ice_aqc_txsched_elem_data node = { 0 };
4003 struct ice_sched_node *parent;
4004 struct ice_q_ctx *q_ctx;
4005 enum ice_status status;
4008 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4011 if (num_qgrps > 1 || buf->num_txqs > 1)
4012 return ICE_ERR_MAX_LIMIT;
4016 if (!ice_is_vsi_valid(hw, vsi_handle))
4017 return ICE_ERR_PARAM;
4019 ice_acquire_lock(&pi->sched_lock);
4021 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4023 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4025 status = ICE_ERR_PARAM;
4029 /* find a parent node */
4030 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4031 ICE_SCHED_NODE_OWNER_LAN);
4033 status = ICE_ERR_PARAM;
4037 buf->parent_teid = parent->info.node_teid;
4038 node.parent_teid = parent->info.node_teid;
4039 /* Mark that the values in the "generic" section as valid. The default
4040 * value in the "generic" section is zero. This means that :
4041 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4042 * - 0 priority among siblings, indicated by Bit 1-3.
4043 * - WFQ, indicated by Bit 4.
4044 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4046 * - Bit 7 is reserved.
4047 * Without setting the generic section as valid in valid_sections, the
4048 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4050 buf->txqs[0].info.valid_sections =
4051 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4052 ICE_AQC_ELEM_VALID_EIR;
4053 buf->txqs[0].info.generic = 0;
4054 buf->txqs[0].info.cir_bw.bw_profile_idx =
4055 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4056 buf->txqs[0].info.cir_bw.bw_alloc =
4057 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4058 buf->txqs[0].info.eir_bw.bw_profile_idx =
4059 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4060 buf->txqs[0].info.eir_bw.bw_alloc =
4061 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4063 /* add the LAN queue */
4064 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4065 if (status != ICE_SUCCESS) {
4066 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4067 LE16_TO_CPU(buf->txqs[0].txq_id),
4068 hw->adminq.sq_last_status);
4072 node.node_teid = buf->txqs[0].q_teid;
4073 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4074 q_ctx->q_handle = q_handle;
4075 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4077 /* add a leaf node into scheduler tree queue layer */
4078 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4080 status = ice_sched_replay_q_bw(pi, q_ctx);
4083 ice_release_lock(&pi->sched_lock);
4089 * @pi: port information structure
4090 * @vsi_handle: software VSI handle
4092 * @num_queues: number of queues
4093 * @q_handles: pointer to software queue handle array
4094 * @q_ids: pointer to the q_id array
4095 * @q_teids: pointer to queue node teids
4096 * @rst_src: if called due to reset, specifies the reset source
4097 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4098 * @cd: pointer to command details structure or NULL
4100 * This function removes queues and their corresponding nodes in SW DB
4103 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4104 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4105 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4106 struct ice_sq_cd *cd)
4108 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4109 struct ice_aqc_dis_txq_item qg_list;
4110 struct ice_q_ctx *q_ctx;
4113 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4117 /* if queue is disabled already yet the disable queue command
4118 * has to be sent to complete the VF reset, then call
4119 * ice_aq_dis_lan_txq without any queue information
4122 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
4127 ice_acquire_lock(&pi->sched_lock);
4129 for (i = 0; i < num_queues; i++) {
4130 struct ice_sched_node *node;
4132 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4135 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
4137 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4141 if (q_ctx->q_handle != q_handles[i]) {
4142 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4143 q_ctx->q_handle, q_handles[i]);
4146 qg_list.parent_teid = node->info.parent_teid;
4148 qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
4149 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
4150 sizeof(qg_list), rst_src, vmvf_num,
4153 if (status != ICE_SUCCESS)
4155 ice_free_sched_node(pi, node);
4156 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4158 ice_release_lock(&pi->sched_lock);
4163 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4164 * @pi: port information structure
4165 * @vsi_handle: software VSI handle
4166 * @tc_bitmap: TC bitmap
4167 * @maxqs: max queues array per TC
4168 * @owner: LAN or RDMA
4170 * This function adds/updates the VSI queues per TC.
4172 static enum ice_status
4173 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4174 u16 *maxqs, u8 owner)
4176 enum ice_status status = ICE_SUCCESS;
4179 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4182 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4183 return ICE_ERR_PARAM;
4185 ice_acquire_lock(&pi->sched_lock);
4187 ice_for_each_traffic_class(i) {
4188 /* configuration is possible only if TC node is present */
4189 if (!ice_sched_get_tc_node(pi, i))
4192 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4193 ice_is_tc_ena(tc_bitmap, i));
4198 ice_release_lock(&pi->sched_lock);
4203 * ice_cfg_vsi_lan - configure VSI LAN queues
4204 * @pi: port information structure
4205 * @vsi_handle: software VSI handle
4206 * @tc_bitmap: TC bitmap
4207 * @max_lanqs: max LAN queues array per TC
4209 * This function adds/updates the VSI LAN queues per TC.
4212 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4215 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4216 ICE_SCHED_NODE_OWNER_LAN);
4220 * ice_is_main_vsi - checks whether the VSI is main VSI
4221 * @hw: pointer to the HW struct
4222 * @vsi_handle: VSI handle
4224 * Checks whether the VSI is the main VSI (the first PF VSI created on
4227 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4229 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4233 * ice_replay_pre_init - replay pre initialization
4234 * @hw: pointer to the HW struct
4235 * @sw: pointer to switch info struct for which function initializes filters
4237 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4239 static enum ice_status
4240 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4244 /* Delete old entries from replay filter list head if there is any */
4245 ice_rm_sw_replay_rule_info(hw, sw);
4246 /* In start of replay, move entries into replay_rules list, it
4247 * will allow adding rules entries back to filt_rules list,
4248 * which is operational list.
4250 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4251 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4252 &sw->recp_list[i].filt_replay_rules);
4253 ice_sched_replay_agg_vsi_preinit(hw);
4255 return ice_sched_replay_tc_node_bw(hw->port_info);
4259 * ice_replay_vsi - replay VSI configuration
4260 * @hw: pointer to the HW struct
4261 * @vsi_handle: driver VSI handle
4263 * Restore all VSI configuration after reset. It is required to call this
4264 * function with main VSI first.
4266 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4268 struct ice_switch_info *sw = hw->switch_info;
4269 struct ice_port_info *pi = hw->port_info;
4270 enum ice_status status;
4272 if (!ice_is_vsi_valid(hw, vsi_handle))
4273 return ICE_ERR_PARAM;
4275 /* Replay pre-initialization if there is any */
4276 if (ice_is_main_vsi(hw, vsi_handle)) {
4277 status = ice_replay_pre_init(hw, sw);
4281 /* Replay per VSI all RSS configurations */
4282 status = ice_replay_rss_cfg(hw, vsi_handle);
4285 /* Replay per VSI all filters */
4286 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4288 status = ice_replay_vsi_agg(hw, vsi_handle);
4293 * ice_replay_post - post replay configuration cleanup
4294 * @hw: pointer to the HW struct
4296 * Post replay cleanup.
4298 void ice_replay_post(struct ice_hw *hw)
4300 /* Delete old entries from replay filter list head */
4301 ice_rm_all_sw_replay_rule_info(hw);
4302 ice_sched_replay_agg(hw);
4306 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4307 * @hw: ptr to the hardware info
4308 * @reg: offset of 64 bit HW register to read from
4309 * @prev_stat_loaded: bool to specify if previous stats are loaded
4310 * @prev_stat: ptr to previous loaded stat value
4311 * @cur_stat: ptr to current stat value
4314 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4315 u64 *prev_stat, u64 *cur_stat)
4317 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4319 /* device stats are not reset at PFR, they likely will not be zeroed
4320 * when the driver starts. Thus, save the value from the first read
4321 * without adding to the statistic value so that we report stats which
4322 * count up from zero.
4324 if (!prev_stat_loaded) {
4325 *prev_stat = new_data;
4329 /* Calculate the difference between the new and old values, and then
4330 * add it to the software stat value.
4332 if (new_data >= *prev_stat)
4333 *cur_stat += new_data - *prev_stat;
4335 /* to manage the potential roll-over */
4336 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4338 /* Update the previously stored value to prepare for next read */
4339 *prev_stat = new_data;
4343 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4344 * @hw: ptr to the hardware info
4345 * @reg: offset of HW register to read from
4346 * @prev_stat_loaded: bool to specify if previous stats are loaded
4347 * @prev_stat: ptr to previous loaded stat value
4348 * @cur_stat: ptr to current stat value
4351 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4352 u64 *prev_stat, u64 *cur_stat)
4356 new_data = rd32(hw, reg);
4358 /* device stats are not reset at PFR, they likely will not be zeroed
4359 * when the driver starts. Thus, save the value from the first read
4360 * without adding to the statistic value so that we report stats which
4361 * count up from zero.
4363 if (!prev_stat_loaded) {
4364 *prev_stat = new_data;
4368 /* Calculate the difference between the new and old values, and then
4369 * add it to the software stat value.
4371 if (new_data >= *prev_stat)
4372 *cur_stat += new_data - *prev_stat;
4374 /* to manage the potential roll-over */
4375 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4377 /* Update the previously stored value to prepare for next read */
4378 *prev_stat = new_data;
4382 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4383 * @hw: ptr to the hardware info
4384 * @vsi_handle: VSI handle
4385 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4386 * @cur_stats: ptr to current stats structure
4388 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4389 * thus cannot be read using the normal ice_stat_update32 function.
4391 * Read the GLV_REPC register associated with the given VSI, and update the
4392 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4394 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4395 * cleared each time it's read.
4397 * Note that the GLV_RDPC register also counts the causes that would trigger
4398 * GLV_REPC. However, it does not give the finer grained detail about why the
4399 * packets are being dropped. The GLV_REPC values can be used to distinguish
4400 * whether Rx packets are dropped due to errors or due to no available
4404 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4405 struct ice_eth_stats *cur_stats)
4407 u16 vsi_num, no_desc, error_cnt;
4410 if (!ice_is_vsi_valid(hw, vsi_handle))
4413 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4415 /* If we haven't loaded stats yet, just clear the current value */
4416 if (!prev_stat_loaded) {
4417 wr32(hw, GLV_REPC(vsi_num), 0);
4421 repc = rd32(hw, GLV_REPC(vsi_num));
4422 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4423 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4425 /* Clear the count by writing to the stats register */
4426 wr32(hw, GLV_REPC(vsi_num), 0);
4428 cur_stats->rx_no_desc += no_desc;
4429 cur_stats->rx_errors += error_cnt;
4433 * ice_sched_query_elem - query element information from HW
4434 * @hw: pointer to the HW struct
4435 * @node_teid: node TEID to be queried
4436 * @buf: buffer to element information
4438 * This function queries HW element information
4441 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4442 struct ice_aqc_get_elem *buf)
4444 u16 buf_size, num_elem_ret = 0;
4445 enum ice_status status;
4447 buf_size = sizeof(*buf);
4448 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4449 buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
4450 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4452 if (status != ICE_SUCCESS || num_elem_ret != 1)
4453 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4458 * ice_get_fw_mode - returns FW mode
4459 * @hw: pointer to the HW struct
4461 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4463 #define ICE_FW_MODE_DBG_M BIT(0)
4464 #define ICE_FW_MODE_REC_M BIT(1)
4465 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4468 /* check the current FW mode */
4469 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4471 if (fw_mode & ICE_FW_MODE_DBG_M)
4472 return ICE_FW_MODE_DBG;
4473 else if (fw_mode & ICE_FW_MODE_REC_M)
4474 return ICE_FW_MODE_REC;
4475 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4476 return ICE_FW_MODE_ROLLBACK;
4478 return ICE_FW_MODE_NORMAL;
4482 * ice_fw_supports_link_override
4483 * @hw: pointer to the hardware structure
4485 * Checks if the firmware supports link override
4487 bool ice_fw_supports_link_override(struct ice_hw *hw)
4489 /* Currently, only supported for E810 devices */
4490 if (hw->mac_type != ICE_MAC_E810)
4493 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4494 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4496 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4497 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4499 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4507 * ice_get_link_default_override
4508 * @ldo: pointer to the link default override struct
4509 * @pi: pointer to the port info struct
4511 * Gets the link default override for a port
4514 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4515 struct ice_port_info *pi)
4517 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4518 struct ice_hw *hw = pi->hw;
4519 enum ice_status status;
4521 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4522 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4524 ice_debug(hw, ICE_DBG_INIT,
4525 "Failed to read link override TLV.\n");
4529 /* Each port has its own config; calculate for our port */
4530 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4531 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4533 /* link options first */
4534 status = ice_read_sr_word(hw, tlv_start, &buf);
4536 ice_debug(hw, ICE_DBG_INIT,
4537 "Failed to read override link options.\n");
4540 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4541 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4542 ICE_LINK_OVERRIDE_PHY_CFG_S;
4544 /* link PHY config */
4545 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4546 status = ice_read_sr_word(hw, offset, &buf);
4548 ice_debug(hw, ICE_DBG_INIT,
4549 "Failed to read override phy config.\n");
4552 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4555 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4556 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4557 status = ice_read_sr_word(hw, (offset + i), &buf);
4559 ice_debug(hw, ICE_DBG_INIT,
4560 "Failed to read override link options.\n");
4563 /* shift 16 bits at a time to fill 64 bits */
4564 ldo->phy_type_low |= ((u64)buf << (i * 16));
4567 /* PHY types high */
4568 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4569 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4570 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4571 status = ice_read_sr_word(hw, (offset + i), &buf);
4573 ice_debug(hw, ICE_DBG_INIT,
4574 "Failed to read override link options.\n");
4577 /* shift 16 bits at a time to fill 64 bits */
4578 ldo->phy_type_high |= ((u64)buf << (i * 16));
4585 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
4586 * @caps: get PHY capability data
4588 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4590 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4591 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4592 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4593 ICE_AQC_PHY_AN_EN_CLAUSE37))
4600 * ice_aq_set_lldp_mib - Set the LLDP MIB
4601 * @hw: pointer to the HW struct
4602 * @mib_type: Local, Remote or both Local and Remote MIBs
4603 * @buf: pointer to the caller-supplied buffer to store the MIB block
4604 * @buf_size: size of the buffer (in bytes)
4605 * @cd: pointer to command details structure or NULL
4607 * Set the LLDP MIB. (0x0A08)
4610 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4611 struct ice_sq_cd *cd)
4613 struct ice_aqc_lldp_set_local_mib *cmd;
4614 struct ice_aq_desc desc;
4616 cmd = &desc.params.lldp_set_mib;
4618 if (buf_size == 0 || !buf)
4619 return ICE_ERR_PARAM;
4621 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4623 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
4624 desc.datalen = CPU_TO_LE16(buf_size);
4626 cmd->type = mib_type;
4627 cmd->length = CPU_TO_LE16(buf_size);
4629 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);