1 /*******************************************************************************
3 Copyright (c) 2001-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_x550.h"
35 #include "ixgbe_x540.h"
36 #include "ixgbe_type.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
42 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
43 * @hw: pointer to hardware structure
45 * Initialize the function pointers and assign the MAC type for X550.
46 * Does not touch the hardware.
48 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
50 struct ixgbe_mac_info *mac = &hw->mac;
51 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
54 DEBUGFUNC("ixgbe_init_ops_X550");
56 ret_val = ixgbe_init_ops_X540(hw);
57 mac->ops.dmac_config = ixgbe_dmac_config_X550;
58 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
59 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
60 mac->ops.setup_eee = ixgbe_setup_eee_X550;
61 mac->ops.set_source_address_pruning =
62 ixgbe_set_source_address_pruning_X550;
63 mac->ops.set_ethertype_anti_spoofing =
64 ixgbe_set_ethertype_anti_spoofing_X550;
66 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
67 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
68 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
69 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
70 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
71 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
72 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
73 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
74 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
76 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
77 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
78 mac->ops.mdd_event = ixgbe_mdd_event_X550;
79 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
80 mac->ops.disable_rx = ixgbe_disable_rx_x550;
85 * ixgbe_identify_phy_x550em - Get PHY type based on device id
86 * @hw: pointer to hardware structure
90 STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
92 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
94 switch (hw->device_id) {
95 case IXGBE_DEV_ID_X550EM_X_SFP:
96 /* set up for CS4227 usage */
97 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
100 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
101 esdp |= IXGBE_ESDP_SDP1_DIR;
103 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
104 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
106 return ixgbe_identify_module_generic(hw);
108 case IXGBE_DEV_ID_X550EM_X_KX4:
109 hw->phy.type = ixgbe_phy_x550em_kx4;
111 case IXGBE_DEV_ID_X550EM_X_KR:
112 hw->phy.type = ixgbe_phy_x550em_kr;
114 case IXGBE_DEV_ID_X550EM_X_1G_T:
115 case IXGBE_DEV_ID_X550EM_X_10G_T:
116 return ixgbe_identify_phy_generic(hw);
120 return IXGBE_SUCCESS;
123 STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
124 u32 device_type, u16 *phy_data)
126 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
127 return IXGBE_NOT_IMPLEMENTED;
130 STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
131 u32 device_type, u16 phy_data)
133 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
134 return IXGBE_NOT_IMPLEMENTED;
138 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
139 * @hw: pointer to hardware structure
141 * Initialize the function pointers and for MAC type X550EM.
142 * Does not touch the hardware.
144 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
146 struct ixgbe_mac_info *mac = &hw->mac;
147 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
148 struct ixgbe_phy_info *phy = &hw->phy;
151 DEBUGFUNC("ixgbe_init_ops_X550EM");
153 /* Similar to X550 so start there. */
154 ret_val = ixgbe_init_ops_X550(hw);
156 /* Since this function eventually calls
157 * ixgbe_init_ops_540 by design, we are setting
158 * the pointers to NULL explicitly here to overwrite
159 * the values being set in the x540 function.
161 /* Thermal sensor not supported in x550EM */
162 mac->ops.get_thermal_sensor_data = NULL;
163 mac->ops.init_thermal_sensor_thresh = NULL;
164 mac->thermal_sensor_enabled = false;
166 /* FCOE not supported in x550EM */
167 mac->ops.get_san_mac_addr = NULL;
168 mac->ops.set_san_mac_addr = NULL;
169 mac->ops.get_wwn_prefix = NULL;
170 mac->ops.get_fcoe_boot_status = NULL;
172 /* IPsec not supported in x550EM */
173 mac->ops.disable_sec_rx_path = NULL;
174 mac->ops.enable_sec_rx_path = NULL;
176 /* X550EM bus type is internal*/
177 hw->bus.type = ixgbe_bus_type_internal;
178 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
180 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
181 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
182 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
183 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
184 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
185 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
186 mac->ops.get_supported_physical_layer =
187 ixgbe_get_supported_physical_layer_X550em;
190 phy->ops.init = ixgbe_init_phy_ops_X550em;
191 phy->ops.identify = ixgbe_identify_phy_x550em;
192 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
193 phy->ops.set_phy_power = NULL;
197 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
198 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
199 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
200 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
201 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
202 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
203 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
204 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
210 * ixgbe_dmac_config_X550
211 * @hw: pointer to hardware structure
213 * Configure DMA coalescing. If enabling dmac, dmac is activated.
214 * When disabling dmac, dmac enable dmac bit is cleared.
216 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
218 u32 reg, high_pri_tc;
220 DEBUGFUNC("ixgbe_dmac_config_X550");
222 /* Disable DMA coalescing before configuring */
223 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
224 reg &= ~IXGBE_DMACR_DMAC_EN;
225 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
227 /* Disable DMA Coalescing if the watchdog timer is 0 */
228 if (!hw->mac.dmac_config.watchdog_timer)
231 ixgbe_dmac_config_tcs_X550(hw);
233 /* Configure DMA Coalescing Control Register */
234 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
236 /* Set the watchdog timer in units of 40.96 usec */
237 reg &= ~IXGBE_DMACR_DMACWT_MASK;
238 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
240 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
241 /* If fcoe is enabled, set high priority traffic class */
242 if (hw->mac.dmac_config.fcoe_en) {
243 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
244 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
245 IXGBE_DMACR_HIGH_PRI_TC_MASK);
247 reg |= IXGBE_DMACR_EN_MNG_IND;
249 /* Enable DMA coalescing after configuration */
250 reg |= IXGBE_DMACR_DMAC_EN;
251 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
254 return IXGBE_SUCCESS;
258 * ixgbe_dmac_config_tcs_X550
259 * @hw: pointer to hardware structure
261 * Configure DMA coalescing threshold per TC. The dmac enable bit must
262 * be cleared before configuring.
264 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
266 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
268 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
270 /* Configure DMA coalescing enabled */
271 switch (hw->mac.dmac_config.link_speed) {
272 case IXGBE_LINK_SPEED_100_FULL:
273 pb_headroom = IXGBE_DMACRXT_100M;
275 case IXGBE_LINK_SPEED_1GB_FULL:
276 pb_headroom = IXGBE_DMACRXT_1G;
279 pb_headroom = IXGBE_DMACRXT_10G;
283 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
284 IXGBE_MHADD_MFS_SHIFT) / 1024);
286 /* Set the per Rx packet buffer receive threshold */
287 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
288 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
289 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
291 if (tc < hw->mac.dmac_config.num_tcs) {
293 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
294 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
295 IXGBE_RXPBSIZE_SHIFT;
297 /* Calculate receive buffer threshold in kilobytes */
298 if (rx_pb_size > pb_headroom)
299 rx_pb_size = rx_pb_size - pb_headroom;
303 /* Minimum of MFS shall be set for DMCTH */
304 reg |= (rx_pb_size > maxframe_size_kb) ?
305 rx_pb_size : maxframe_size_kb;
307 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
309 return IXGBE_SUCCESS;
313 * ixgbe_dmac_update_tcs_X550
314 * @hw: pointer to hardware structure
316 * Disables dmac, updates per TC settings, and then enables dmac.
318 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
322 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
324 /* Disable DMA coalescing before configuring */
325 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
326 reg &= ~IXGBE_DMACR_DMAC_EN;
327 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
329 ixgbe_dmac_config_tcs_X550(hw);
331 /* Enable DMA coalescing after configuration */
332 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
333 reg |= IXGBE_DMACR_DMAC_EN;
334 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
336 return IXGBE_SUCCESS;
340 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
341 * @hw: pointer to hardware structure
343 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
344 * ixgbe_hw struct in order to set up EEPROM access.
346 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
348 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
352 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
354 if (eeprom->type == ixgbe_eeprom_uninitialized) {
355 eeprom->semaphore_delay = 10;
356 eeprom->type = ixgbe_flash;
358 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
359 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
360 IXGBE_EEC_SIZE_SHIFT);
361 eeprom->word_size = 1 << (eeprom_size +
362 IXGBE_EEPROM_WORD_SIZE_SHIFT);
364 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
365 eeprom->type, eeprom->word_size);
368 return IXGBE_SUCCESS;
372 * ixgbe_setup_eee_X550 - Enable/disable EEE support
373 * @hw: pointer to the HW structure
374 * @enable_eee: boolean flag to enable EEE
376 * Enable/disable EEE based on enable_eee flag.
377 * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
381 s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
388 DEBUGFUNC("ixgbe_setup_eee_X550");
390 eeer = IXGBE_READ_REG(hw, IXGBE_EEER);
391 /* Enable or disable EEE per flag */
393 eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
395 if (hw->device_id == IXGBE_DEV_ID_X550T) {
396 /* Advertise EEE capability */
397 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
398 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
400 autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
401 IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
402 IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
404 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
405 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
406 } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
407 status = ixgbe_read_iosf_sb_reg_x550(hw,
408 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
409 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
410 if (status != IXGBE_SUCCESS)
413 link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
414 IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
416 status = ixgbe_write_iosf_sb_reg_x550(hw,
417 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
418 IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
419 if (status != IXGBE_SUCCESS)
423 eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
425 if (hw->device_id == IXGBE_DEV_ID_X550T) {
426 /* Disable advertised EEE capability */
427 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
428 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
430 autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
431 IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
432 IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
434 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
435 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
436 } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
437 status = ixgbe_read_iosf_sb_reg_x550(hw,
438 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
439 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
440 if (status != IXGBE_SUCCESS)
443 link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
444 IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
446 status = ixgbe_write_iosf_sb_reg_x550(hw,
447 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
448 IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
449 if (status != IXGBE_SUCCESS)
453 IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
455 return IXGBE_SUCCESS;
459 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
460 * @hw: pointer to hardware structure
461 * @enable: enable or disable source address pruning
462 * @pool: Rx pool to set source address pruning for
464 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
469 /* max rx pool is 63 */
473 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
474 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
477 pfflp |= (1ULL << pool);
479 pfflp &= ~(1ULL << pool);
481 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
482 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
486 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
487 * @hw: pointer to hardware structure
488 * @enable: enable or disable switch for Ethertype anti-spoofing
489 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
492 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
495 int vf_target_reg = vf >> 3;
496 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
499 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
501 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
503 pfvfspoof |= (1 << vf_target_shift);
505 pfvfspoof &= ~(1 << vf_target_shift);
507 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
511 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
513 * @hw: pointer to hardware structure
514 * @reg_addr: 32 bit PHY register to write
515 * @device_type: 3 bit device type
516 * @data: Data to write to the register
518 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
519 u32 device_type, u32 data)
521 u32 i, command, error;
523 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
524 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
526 /* Write IOSF control register */
527 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
529 /* Write IOSF data register */
530 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
532 * Check every 10 usec to see if the address cycle completed.
533 * The SB IOSF BUSY bit will clear when the operation is
536 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
539 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
540 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
544 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
545 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
546 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
547 ERROR_REPORT2(IXGBE_ERROR_POLLING,
548 "Failed to write, error %x\n", error);
549 return IXGBE_ERR_PHY;
552 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
553 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Write timed out\n");
554 return IXGBE_ERR_PHY;
557 return IXGBE_SUCCESS;
561 * ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
563 * @hw: pointer to hardware structure
564 * @reg_addr: 32 bit PHY register to write
565 * @device_type: 3 bit device type
566 * @phy_data: Pointer to read data from the register
568 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
569 u32 device_type, u32 *data)
571 u32 i, command, error;
573 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
574 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
576 /* Write IOSF control register */
577 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
580 * Check every 10 usec to see if the address cycle completed.
581 * The SB IOSF BUSY bit will clear when the operation is
584 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
587 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
588 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
592 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
593 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
594 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
595 ERROR_REPORT2(IXGBE_ERROR_POLLING,
596 "Failed to read, error %x\n", error);
597 return IXGBE_ERR_PHY;
600 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
601 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Read timed out\n");
602 return IXGBE_ERR_PHY;
605 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
607 return IXGBE_SUCCESS;
611 * ixgbe_disable_mdd_X550
612 * @hw: pointer to hardware structure
614 * Disable malicious driver detection
616 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
620 DEBUGFUNC("ixgbe_disable_mdd_X550");
622 /* Disable MDD for TX DMA and interrupt */
623 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
624 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
625 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
627 /* Disable MDD for RX and interrupt */
628 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
629 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
630 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
634 * ixgbe_enable_mdd_X550
635 * @hw: pointer to hardware structure
637 * Enable malicious driver detection
639 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
643 DEBUGFUNC("ixgbe_enable_mdd_X550");
645 /* Enable MDD for TX DMA and interrupt */
646 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
647 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
648 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
650 /* Enable MDD for RX and interrupt */
651 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
652 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
653 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
657 * ixgbe_restore_mdd_vf_X550
658 * @hw: pointer to hardware structure
661 * Restore VF that was disabled during malicious driver detection event
663 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
665 u32 idx, reg, num_qs, start_q, bitmask;
667 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
669 /* Map VF to queues */
670 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
671 switch (reg & IXGBE_MRQC_MRQE_MASK) {
672 case IXGBE_MRQC_VMDQRT8TCEN:
673 num_qs = 8; /* 16 VFs / pools */
674 bitmask = 0x000000FF;
676 case IXGBE_MRQC_VMDQRSS32EN:
677 case IXGBE_MRQC_VMDQRT4TCEN:
678 num_qs = 4; /* 32 VFs / pools */
679 bitmask = 0x0000000F;
681 default: /* 64 VFs / pools */
683 bitmask = 0x00000003;
686 start_q = vf * num_qs;
688 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
691 reg |= (bitmask << (start_q % 32));
692 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
693 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
697 * ixgbe_mdd_event_X550
698 * @hw: pointer to hardware structure
699 * @vf_bitmap: vf bitmap of malicious vfs
701 * Handle malicious driver detection event.
703 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
706 u32 i, j, reg, q, shift, vf, idx;
708 DEBUGFUNC("ixgbe_mdd_event_X550");
710 /* figure out pool size for mapping to vf's */
711 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
712 switch (reg & IXGBE_MRQC_MRQE_MASK) {
713 case IXGBE_MRQC_VMDQRT8TCEN:
714 shift = 3; /* 16 VFs / pools */
716 case IXGBE_MRQC_VMDQRSS32EN:
717 case IXGBE_MRQC_VMDQRT4TCEN:
718 shift = 2; /* 32 VFs / pools */
721 shift = 1; /* 64 VFs / pools */
725 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
726 for (i = 0; i < 4; i++) {
727 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
728 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
733 /* Get malicious queue */
734 for (j = 0; j < 32 && wqbr; j++) {
736 if (!(wqbr & (1 << j)))
739 /* Get queue from bitmask */
742 /* Map queue to vf */
745 /* Set vf bit in vf_bitmap */
747 vf_bitmap[idx] |= (1 << (vf % 32));
754 * ixgbe_get_media_type_X550em - Get media type
755 * @hw: pointer to hardware structure
757 * Returns the media type (fiber, copper, backplane)
759 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
761 enum ixgbe_media_type media_type;
763 DEBUGFUNC("ixgbe_get_media_type_X550em");
765 /* Detect if there is a copper PHY attached. */
766 switch (hw->device_id) {
767 case IXGBE_DEV_ID_X550EM_X_KR:
768 case IXGBE_DEV_ID_X550EM_X_KX4:
769 media_type = ixgbe_media_type_backplane;
771 case IXGBE_DEV_ID_X550EM_X_SFP:
772 media_type = ixgbe_media_type_fiber;
774 case IXGBE_DEV_ID_X550EM_X_1G_T:
775 case IXGBE_DEV_ID_X550EM_X_10G_T:
776 media_type = ixgbe_media_type_copper;
779 media_type = ixgbe_media_type_unknown;
786 * ixgbe_setup_sfp_modules_X550em - Setup SFP module
787 * @hw: pointer to hardware structure
789 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
792 u16 reg_slice, edc_mode;
795 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
797 switch (hw->phy.sfp_type) {
798 case ixgbe_sfp_type_unknown:
799 return IXGBE_SUCCESS;
800 case ixgbe_sfp_type_not_present:
801 return IXGBE_ERR_SFP_NOT_PRESENT;
802 case ixgbe_sfp_type_da_cu_core0:
803 case ixgbe_sfp_type_da_cu_core1:
806 case ixgbe_sfp_type_srlr_core0:
807 case ixgbe_sfp_type_srlr_core1:
808 case ixgbe_sfp_type_da_act_lmt_core0:
809 case ixgbe_sfp_type_da_act_lmt_core1:
810 case ixgbe_sfp_type_1g_sx_core0:
811 case ixgbe_sfp_type_1g_sx_core1:
812 case ixgbe_sfp_type_1g_lx_core0:
813 case ixgbe_sfp_type_1g_lx_core1:
814 setup_linear = false;
817 return IXGBE_ERR_SFP_NOT_SUPPORTED;
820 ixgbe_init_mac_link_ops_X550em(hw);
821 hw->phy.ops.reset = NULL;
823 /* The CS4227 slice address is the base address + the port-pair reg
824 * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
826 reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
829 edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
831 edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
833 /* Configure CS4227 for connection type. */
834 ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
837 if (ret_val != IXGBE_SUCCESS)
838 ret_val = ixgbe_write_i2c_combined(hw, 0x80, reg_slice,
845 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
846 * @hw: pointer to hardware structure
848 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
850 struct ixgbe_mac_info *mac = &hw->mac;
852 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
854 /* CS4227 does not support autoneg, so disable the laser control
855 * functions for SFP+ fiber
857 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
858 mac->ops.disable_tx_laser = NULL;
859 mac->ops.enable_tx_laser = NULL;
860 mac->ops.flap_tx_laser = NULL;
865 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
866 * @hw: pointer to hardware structure
867 * @speed: pointer to link speed
868 * @autoneg: true when autoneg or autotry is enabled
870 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
871 ixgbe_link_speed *speed,
874 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
877 if (hw->phy.media_type == ixgbe_media_type_fiber) {
879 /* CS4227 SFP must not enable auto-negotiation */
882 /* Check if 1G SFP module. */
883 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
884 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
885 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
886 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
887 *speed = IXGBE_LINK_SPEED_1GB_FULL;
888 return IXGBE_SUCCESS;
891 /* Link capabilities are based on SFP */
892 if (hw->phy.multispeed_fiber)
893 *speed = IXGBE_LINK_SPEED_10GB_FULL |
894 IXGBE_LINK_SPEED_1GB_FULL;
896 *speed = IXGBE_LINK_SPEED_10GB_FULL;
898 *speed = IXGBE_LINK_SPEED_10GB_FULL |
899 IXGBE_LINK_SPEED_1GB_FULL;
903 return IXGBE_SUCCESS;
907 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
908 * @hw: pointer to hardware structure
910 * Initialize any function pointers that were not able to be
911 * set during init_shared_code because the PHY/SFP type was
912 * not known. Perform the SFP init if necessary.
914 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
916 struct ixgbe_phy_info *phy = &hw->phy;
920 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
922 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
923 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
924 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
926 if (hw->bus.lan_id) {
927 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
928 esdp |= IXGBE_ESDP_SDP1_DIR;
930 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
931 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
934 /* Identify the PHY or SFP module */
935 ret_val = phy->ops.identify(hw);
936 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
939 /* Setup function pointers based on detected SFP module and speeds */
940 ixgbe_init_mac_link_ops_X550em(hw);
941 if (phy->sfp_type != ixgbe_sfp_type_unknown)
942 phy->ops.reset = NULL;
944 /* Set functions pointers based on phy type */
945 switch (hw->phy.type) {
946 case ixgbe_phy_x550em_kx4:
947 phy->ops.setup_link = ixgbe_setup_kx4_x550em;
948 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
949 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
951 case ixgbe_phy_x550em_kr:
952 phy->ops.setup_link = ixgbe_setup_kr_x550em;
953 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
954 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
956 case ixgbe_phy_x550em_ext_t:
957 phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
966 * ixgbe_reset_hw_X550em - Perform hardware reset
967 * @hw: pointer to hardware structure
969 * Resets the hardware by resetting the transmit and receive units, masks
970 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
973 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
975 ixgbe_link_speed link_speed;
979 bool link_up = false;
981 DEBUGFUNC("ixgbe_reset_hw_X550em");
983 /* Call adapter stop to disable Tx/Rx and clear interrupts */
984 status = hw->mac.ops.stop_adapter(hw);
985 if (status != IXGBE_SUCCESS)
988 /* flush pending Tx transactions */
989 ixgbe_clear_tx_pending(hw);
991 /* PHY ops must be identified and initialized prior to reset */
993 /* Identify PHY and related function pointers */
994 status = hw->phy.ops.init(hw);
996 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
999 /* start the external PHY */
1000 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
1001 status = ixgbe_init_ext_t_x550em(hw);
1006 /* Setup SFP module if there is one present. */
1007 if (hw->phy.sfp_setup_needed) {
1008 status = hw->mac.ops.setup_sfp(hw);
1009 hw->phy.sfp_setup_needed = false;
1012 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1016 if (!hw->phy.reset_disable && hw->phy.ops.reset)
1017 hw->phy.ops.reset(hw);
1020 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
1021 * If link reset is used when link is up, it might reset the PHY when
1022 * mng is using it. If link is down or the flag to force full link
1023 * reset is set, then perform link reset.
1025 ctrl = IXGBE_CTRL_LNK_RST;
1026 if (!hw->force_full_reset) {
1027 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1029 ctrl = IXGBE_CTRL_RST;
1032 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1033 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1034 IXGBE_WRITE_FLUSH(hw);
1036 /* Poll for reset bit to self-clear meaning reset is complete */
1037 for (i = 0; i < 10; i++) {
1039 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1040 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1044 if (ctrl & IXGBE_CTRL_RST_MASK) {
1045 status = IXGBE_ERR_RESET_FAILED;
1046 DEBUGOUT("Reset polling failed to complete.\n");
1051 /* Double resets are required for recovery from certain error
1052 * conditions. Between resets, it is necessary to stall to
1053 * allow time for any pending HW events to complete.
1055 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1056 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1060 /* Store the permanent mac address */
1061 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1063 /* Store MAC address from RAR0, clear receive address registers, and
1064 * clear the multicast table. Also reset num_rar_entries to 128,
1065 * since we modify this value when programming the SAN MAC address.
1067 hw->mac.num_rar_entries = 128;
1068 hw->mac.ops.init_rx_addrs(hw);
1075 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
1076 * @hw: pointer to hardware structure
1078 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
1084 /* TODO: The number of attempts and delay between attempts is undefined */
1086 /* decrement retries counter and exit if we hit 0 */
1088 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
1089 "External PHY not yet finished resetting.");
1090 return IXGBE_ERR_PHY;
1096 status = hw->phy.ops.read_reg(hw,
1097 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
1098 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1101 if (status != IXGBE_SUCCESS)
1104 /* Verify PHY FW reset has completed */
1105 } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
1107 /* Set port to low power mode */
1108 status = hw->phy.ops.read_reg(hw,
1109 IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
1110 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1113 if (status != IXGBE_SUCCESS)
1116 reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
1118 status = hw->phy.ops.write_reg(hw,
1119 IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
1120 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1123 if (status != IXGBE_SUCCESS)
1126 /* Enable the transmitter */
1127 status = hw->phy.ops.read_reg(hw,
1128 IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
1129 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1132 if (status != IXGBE_SUCCESS)
1135 reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
1137 status = hw->phy.ops.write_reg(hw,
1138 IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
1139 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1142 if (status != IXGBE_SUCCESS)
1145 /* Un-stall the PHY FW */
1146 status = hw->phy.ops.read_reg(hw,
1147 IXGBE_MDIO_GLOBAL_RES_PR_10,
1148 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1151 if (status != IXGBE_SUCCESS)
1154 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
1156 status = hw->phy.ops.write_reg(hw,
1157 IXGBE_MDIO_GLOBAL_RES_PR_10,
1158 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1165 * ixgbe_setup_kr_x550em - Configure the KR PHY.
1166 * @hw: pointer to hardware structure
1168 * Configures the integrated KR PHY.
1170 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
1175 status = ixgbe_read_iosf_sb_reg_x550(hw,
1176 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1177 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1181 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1182 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
1183 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
1184 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
1185 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
1187 /* Advertise 10G support. */
1188 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
1189 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
1191 /* Advertise 1G support. */
1192 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
1193 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
1195 /* Restart auto-negotiation. */
1196 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1197 status = ixgbe_write_iosf_sb_reg_x550(hw,
1198 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1199 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1205 * ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
1206 * @hw: pointer to hardware structure
1208 * Configures the integrated KX4 PHY.
1210 s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
1215 status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
1216 IXGBE_SB_IOSF_TARGET_KX4_PCS0 + hw->bus.lan_id, ®_val);
1220 reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
1221 IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
1223 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
1225 /* Advertise 10G support. */
1226 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
1227 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
1229 /* Advertise 1G support. */
1230 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
1231 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
1233 /* Restart auto-negotiation. */
1234 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
1235 status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
1236 IXGBE_SB_IOSF_TARGET_KX4_PCS0 + hw->bus.lan_id, reg_val);
1242 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
1243 * @hw: pointer to hardware structure
1244 * @speed: the link speed to force
1246 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
1247 * internal and external PHY at a specific speed, without autonegotiation.
1249 STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
1254 /* Disable AN and force speed to 10G Serial. */
1255 status = ixgbe_read_iosf_sb_reg_x550(hw,
1256 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1257 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1258 if (status != IXGBE_SUCCESS)
1261 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1262 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1264 /* Select forced link speed for internal PHY. */
1266 case IXGBE_LINK_SPEED_10GB_FULL:
1267 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
1269 case IXGBE_LINK_SPEED_1GB_FULL:
1270 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1273 /* Other link speeds are not supported by internal KR PHY. */
1274 return IXGBE_ERR_LINK_SETUP;
1277 status = ixgbe_write_iosf_sb_reg_x550(hw,
1278 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1279 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1280 if (status != IXGBE_SUCCESS)
1283 /* Disable training protocol FSM. */
1284 status = ixgbe_read_iosf_sb_reg_x550(hw,
1285 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1286 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1287 if (status != IXGBE_SUCCESS)
1289 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
1290 status = ixgbe_write_iosf_sb_reg_x550(hw,
1291 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1292 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1293 if (status != IXGBE_SUCCESS)
1296 /* Disable Flex from training TXFFE. */
1297 status = ixgbe_read_iosf_sb_reg_x550(hw,
1298 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
1299 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1300 if (status != IXGBE_SUCCESS)
1302 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
1303 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
1304 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
1305 status = ixgbe_write_iosf_sb_reg_x550(hw,
1306 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
1307 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1308 if (status != IXGBE_SUCCESS)
1310 status = ixgbe_read_iosf_sb_reg_x550(hw,
1311 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
1312 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1313 if (status != IXGBE_SUCCESS)
1315 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
1316 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
1317 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
1318 status = ixgbe_write_iosf_sb_reg_x550(hw,
1319 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
1320 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1321 if (status != IXGBE_SUCCESS)
1324 /* Enable override for coefficients. */
1325 status = ixgbe_read_iosf_sb_reg_x550(hw,
1326 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
1327 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1328 if (status != IXGBE_SUCCESS)
1330 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
1331 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
1332 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
1333 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
1334 status = ixgbe_write_iosf_sb_reg_x550(hw,
1335 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
1336 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1337 if (status != IXGBE_SUCCESS)
1340 /* Toggle port SW reset by AN reset. */
1341 status = ixgbe_read_iosf_sb_reg_x550(hw,
1342 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1343 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1344 if (status != IXGBE_SUCCESS)
1346 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1347 status = ixgbe_write_iosf_sb_reg_x550(hw,
1348 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1349 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1355 * ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
1356 * @hw: point to hardware structure
1358 * Configures the integrated KR PHY to talk to the external PHY. The base
1359 * driver will call this function when it gets notification via interrupt from
1360 * the external PHY. This function forces the internal PHY into iXFI mode at
1361 * the correct speed.
1363 * A return of a non-zero value indicates an error, and the base driver should
1364 * not report link up.
1366 s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
1369 u16 lasi, autoneg_status, speed;
1370 ixgbe_link_speed force_speed;
1372 /* Verify that the external link status has changed */
1373 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
1374 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1376 if (status != IXGBE_SUCCESS)
1379 /* If there was no change in link status, we can just exit */
1380 if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
1381 return IXGBE_SUCCESS;
1383 /* we read this twice back to back to indicate current status */
1384 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
1385 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1387 if (status != IXGBE_SUCCESS)
1390 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
1391 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1393 if (status != IXGBE_SUCCESS)
1396 /* If link is not up return an error indicating treat link as down */
1397 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
1398 return IXGBE_ERR_INVALID_LINK_SETTINGS;
1400 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
1401 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1404 /* clear everything but the speed and duplex bits */
1405 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
1408 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
1409 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
1411 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
1412 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
1415 /* Internal PHY does not support anything else */
1416 return IXGBE_ERR_INVALID_LINK_SETTINGS;
1419 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
1423 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
1424 * @hw: pointer to hardware structure
1426 * Configures the integrated KR PHY to use internal loopback mode.
1428 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
1433 /* Disable AN and force speed to 10G Serial. */
1434 status = ixgbe_read_iosf_sb_reg_x550(hw,
1435 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1436 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1437 if (status != IXGBE_SUCCESS)
1439 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1440 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1441 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
1442 status = ixgbe_write_iosf_sb_reg_x550(hw,
1443 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1444 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1445 if (status != IXGBE_SUCCESS)
1448 /* Set near-end loopback clocks. */
1449 status = ixgbe_read_iosf_sb_reg_x550(hw,
1450 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
1451 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1452 if (status != IXGBE_SUCCESS)
1454 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
1455 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
1456 status = ixgbe_write_iosf_sb_reg_x550(hw,
1457 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
1458 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1459 if (status != IXGBE_SUCCESS)
1462 /* Set loopback enable. */
1463 status = ixgbe_read_iosf_sb_reg_x550(hw,
1464 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
1465 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1466 if (status != IXGBE_SUCCESS)
1468 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
1469 status = ixgbe_write_iosf_sb_reg_x550(hw,
1470 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
1471 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1472 if (status != IXGBE_SUCCESS)
1475 /* Training bypass. */
1476 status = ixgbe_read_iosf_sb_reg_x550(hw,
1477 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1478 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1479 if (status != IXGBE_SUCCESS)
1481 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
1482 status = ixgbe_write_iosf_sb_reg_x550(hw,
1483 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1484 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1490 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
1491 * assuming that the semaphore is already obtained.
1492 * @hw: pointer to hardware structure
1493 * @offset: offset of word in the EEPROM to read
1494 * @data: word read from the EEPROM
1496 * Reads a 16 bit word from the EEPROM using the hostif.
1498 s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
1502 struct ixgbe_hic_read_shadow_ram buffer;
1504 DEBUGFUNC("ixgbe_read_ee_hostif_data_X550");
1505 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
1506 buffer.hdr.req.buf_lenh = 0;
1507 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
1508 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
1510 /* convert offset from words to bytes */
1511 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
1513 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
1515 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
1517 IXGBE_HI_COMMAND_TIMEOUT, false);
1522 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
1523 FW_NVM_DATA_OFFSET);
1529 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
1530 * @hw: pointer to hardware structure
1531 * @offset: offset of word in the EEPROM to read
1532 * @data: word read from the EEPROM
1534 * Reads a 16 bit word from the EEPROM using the hostif.
1536 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
1539 s32 status = IXGBE_SUCCESS;
1541 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
1543 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
1545 status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
1546 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1548 status = IXGBE_ERR_SWFW_SYNC;
1555 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
1556 * @hw: pointer to hardware structure
1557 * @offset: offset of word in the EEPROM to read
1558 * @words: number of words
1559 * @data: word(s) read from the EEPROM
1561 * Reads a 16 bit word(s) from the EEPROM using the hostif.
1563 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
1564 u16 offset, u16 words, u16 *data)
1566 struct ixgbe_hic_read_shadow_ram buffer;
1567 u32 current_word = 0;
1572 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
1574 /* Take semaphore for the entire operation. */
1575 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1577 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
1581 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
1582 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
1584 words_to_read = words;
1586 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
1587 buffer.hdr.req.buf_lenh = 0;
1588 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
1589 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
1591 /* convert offset from words to bytes */
1592 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
1593 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
1595 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
1597 IXGBE_HI_COMMAND_TIMEOUT,
1601 DEBUGOUT("Host interface command failed\n");
1605 for (i = 0; i < words_to_read; i++) {
1606 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
1608 u32 value = IXGBE_READ_REG(hw, reg);
1610 data[current_word] = (u16)(value & 0xffff);
1613 if (i < words_to_read) {
1615 data[current_word] = (u16)(value & 0xffff);
1619 words -= words_to_read;
1623 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1628 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
1629 * @hw: pointer to hardware structure
1630 * @offset: offset of word in the EEPROM to write
1631 * @data: word write to the EEPROM
1633 * Write a 16 bit word to the EEPROM using the hostif.
1635 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
1639 struct ixgbe_hic_write_shadow_ram buffer;
1641 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
1643 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
1644 buffer.hdr.req.buf_lenh = 0;
1645 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
1646 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
1649 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
1651 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
1653 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
1655 IXGBE_HI_COMMAND_TIMEOUT, false);
1661 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
1662 * @hw: pointer to hardware structure
1663 * @offset: offset of word in the EEPROM to write
1664 * @data: word write to the EEPROM
1666 * Write a 16 bit word to the EEPROM using the hostif.
1668 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
1671 s32 status = IXGBE_SUCCESS;
1673 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
1675 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
1677 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
1678 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1680 DEBUGOUT("write ee hostif failed to get semaphore");
1681 status = IXGBE_ERR_SWFW_SYNC;
1688 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
1689 * @hw: pointer to hardware structure
1690 * @offset: offset of word in the EEPROM to write
1691 * @words: number of words
1692 * @data: word(s) write to the EEPROM
1694 * Write a 16 bit word(s) to the EEPROM using the hostif.
1696 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
1697 u16 offset, u16 words, u16 *data)
1699 s32 status = IXGBE_SUCCESS;
1702 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
1704 /* Take semaphore for the entire operation. */
1705 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1706 if (status != IXGBE_SUCCESS) {
1707 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
1711 for (i = 0; i < words; i++) {
1712 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
1715 if (status != IXGBE_SUCCESS) {
1716 DEBUGOUT("Eeprom buffered write failed\n");
1721 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1728 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
1729 * @hw: pointer to hardware structure
1730 * @ptr: pointer offset in eeprom
1731 * @size: size of section pointed by ptr, if 0 first word will be used as size
1732 * @csum: address of checksum to update
1734 * Returns error status for any failure
1736 STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
1737 u16 size, u16 *csum, u16 *buffer,
1742 u16 length, bufsz, i, start;
1745 bufsz = sizeof(buf) / sizeof(buf[0]);
1747 /* Read a chunk at the pointer location */
1749 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
1751 DEBUGOUT("Failed to read EEPROM image\n");
1756 if (buffer_size < ptr)
1757 return IXGBE_ERR_PARAM;
1758 local_buffer = &buffer[ptr];
1766 length = local_buffer[0];
1768 /* Skip pointer section if length is invalid. */
1769 if (length == 0xFFFF || length == 0 ||
1770 (ptr + length) >= hw->eeprom.word_size)
1771 return IXGBE_SUCCESS;
1774 if (buffer && ((u32)start + (u32)length > buffer_size))
1775 return IXGBE_ERR_PARAM;
1777 for (i = start; length; i++, length--) {
1778 if (i == bufsz && !buffer) {
1784 /* Read a chunk at the pointer location */
1785 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
1788 DEBUGOUT("Failed to read EEPROM image\n");
1792 *csum += local_buffer[i];
1794 return IXGBE_SUCCESS;
1798 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
1799 * @hw: pointer to hardware structure
1800 * @buffer: pointer to buffer containing calculated checksum
1801 * @buffer_size: size of buffer
1803 * Returns a negative error code on error, or the 16-bit checksum
1805 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
1807 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
1811 u16 pointer, i, size;
1813 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
1815 hw->eeprom.ops.init_params(hw);
1818 /* Read pointer area */
1819 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
1820 IXGBE_EEPROM_LAST_WORD + 1,
1823 DEBUGOUT("Failed to read EEPROM image\n");
1826 local_buffer = eeprom_ptrs;
1828 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
1829 return IXGBE_ERR_PARAM;
1830 local_buffer = buffer;
1834 * For X550 hardware include 0x0-0x41 in the checksum, skip the
1835 * checksum word itself
1837 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
1838 if (i != IXGBE_EEPROM_CHECKSUM)
1839 checksum += local_buffer[i];
1842 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
1843 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
1845 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
1846 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
1849 pointer = local_buffer[i];
1851 /* Skip pointer section if the pointer is invalid. */
1852 if (pointer == 0xFFFF || pointer == 0 ||
1853 pointer >= hw->eeprom.word_size)
1857 case IXGBE_PCIE_GENERAL_PTR:
1858 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
1860 case IXGBE_PCIE_CONFIG0_PTR:
1861 case IXGBE_PCIE_CONFIG1_PTR:
1862 size = IXGBE_PCIE_CONFIG_SIZE;
1869 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
1870 buffer, buffer_size);
1875 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1877 return (s32)checksum;
1881 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
1882 * @hw: pointer to hardware structure
1884 * Returns a negative error code on error, or the 16-bit checksum
1886 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
1888 return ixgbe_calc_checksum_X550(hw, NULL, 0);
1892 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
1893 * @hw: pointer to hardware structure
1894 * @checksum_val: calculated checksum
1896 * Performs checksum calculation and validates the EEPROM checksum. If the
1897 * caller does not need checksum_val, the value can be NULL.
1899 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
1903 u16 read_checksum = 0;
1905 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
1907 /* Read the first word from the EEPROM. If this times out or fails, do
1908 * not continue or we could be in for a very long wait while every
1911 status = hw->eeprom.ops.read(hw, 0, &checksum);
1913 DEBUGOUT("EEPROM read failed\n");
1917 status = hw->eeprom.ops.calc_checksum(hw);
1921 checksum = (u16)(status & 0xffff);
1923 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
1928 /* Verify read checksum from EEPROM is the same as
1929 * calculated checksum
1931 if (read_checksum != checksum) {
1932 status = IXGBE_ERR_EEPROM_CHECKSUM;
1933 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
1934 "Invalid EEPROM checksum");
1937 /* If the user cares, return the calculated checksum */
1939 *checksum_val = checksum;
1945 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
1946 * @hw: pointer to hardware structure
1948 * After writing EEPROM to shadow RAM using EEWR register, software calculates
1949 * checksum and updates the EEPROM and instructs the hardware to update
1952 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
1957 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
1959 /* Read the first word from the EEPROM. If this times out or fails, do
1960 * not continue or we could be in for a very long wait while every
1963 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
1965 DEBUGOUT("EEPROM read failed\n");
1969 status = ixgbe_calc_eeprom_checksum_X550(hw);
1973 checksum = (u16)(status & 0xffff);
1975 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
1980 status = ixgbe_update_flash_X550(hw);
1986 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
1987 * @hw: pointer to hardware structure
1989 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
1991 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
1993 s32 status = IXGBE_SUCCESS;
1994 union ixgbe_hic_hdr2 buffer;
1996 DEBUGFUNC("ixgbe_update_flash_X550");
1998 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
1999 buffer.req.buf_lenh = 0;
2000 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
2001 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
2003 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
2005 IXGBE_HI_COMMAND_TIMEOUT, false);
2011 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
2012 * @hw: pointer to hardware structure
2014 * Determines physical layer capabilities of the current configuration.
2016 u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
2018 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2019 u16 ext_ability = 0;
2021 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
2023 hw->phy.ops.identify(hw);
2025 switch (hw->phy.type) {
2026 case ixgbe_phy_x550em_kr:
2027 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
2028 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2030 case ixgbe_phy_x550em_kx4:
2031 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2032 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2034 case ixgbe_phy_x550em_ext_t:
2035 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2036 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2038 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2039 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2040 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2041 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2047 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
2048 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2050 return physical_layer;
2054 * ixgbe_get_bus_info_x550em - Set PCI bus info
2055 * @hw: pointer to hardware structure
2057 * Sets bus link width and speed to unknown because X550em is
2060 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
2063 DEBUGFUNC("ixgbe_get_bus_info_x550em");
2065 hw->bus.width = ixgbe_bus_width_unknown;
2066 hw->bus.speed = ixgbe_bus_speed_unknown;
2068 return IXGBE_SUCCESS;
2072 * ixgbe_disable_rx_x550 - Disable RX unit
2074 * Enables the Rx DMA unit for x550
2076 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
2078 u32 rxctrl, pfdtxgswc;
2080 struct ixgbe_hic_disable_rxen fw_cmd;
2082 DEBUGFUNC("ixgbe_enable_rx_dma_x550");
2084 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2085 if (rxctrl & IXGBE_RXCTRL_RXEN) {
2086 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
2087 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
2088 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
2089 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
2090 hw->mac.set_lben = true;
2092 hw->mac.set_lben = false;
2095 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
2096 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
2097 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
2098 fw_cmd.port_number = (u8)hw->bus.lan_id;
2100 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
2101 sizeof(struct ixgbe_hic_disable_rxen),
2102 IXGBE_HI_COMMAND_TIMEOUT, true);
2104 /* If we fail - disable RX using register write */
2106 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2107 if (rxctrl & IXGBE_RXCTRL_RXEN) {
2108 rxctrl &= ~IXGBE_RXCTRL_RXEN;
2109 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);