1 /*******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_x550.h"
35 #include "ixgbe_x540.h"
36 #include "ixgbe_type.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
43 * ixgbe_init_ops_X550 - Inits func ptrs and MAC type
44 * @hw: pointer to hardware structure
46 * Initialize the function pointers and assign the MAC type for X550.
47 * Does not touch the hardware.
49 s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
51 struct ixgbe_mac_info *mac = &hw->mac;
52 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
55 DEBUGFUNC("ixgbe_init_ops_X550");
57 ret_val = ixgbe_init_ops_X540(hw);
58 mac->ops.dmac_config = ixgbe_dmac_config_X550;
59 mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
60 mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
61 mac->ops.setup_eee = ixgbe_setup_eee_X550;
62 mac->ops.set_source_address_pruning =
63 ixgbe_set_source_address_pruning_X550;
64 mac->ops.set_ethertype_anti_spoofing =
65 ixgbe_set_ethertype_anti_spoofing_X550;
67 mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
68 eeprom->ops.init_params = ixgbe_init_eeprom_params_X550;
69 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
70 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
71 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
72 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
73 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
74 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
75 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
77 mac->ops.disable_mdd = ixgbe_disable_mdd_X550;
78 mac->ops.enable_mdd = ixgbe_enable_mdd_X550;
79 mac->ops.mdd_event = ixgbe_mdd_event_X550;
80 mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
81 mac->ops.disable_rx = ixgbe_disable_rx_x550;
86 * ixgbe_read_cs4227 - Read CS4227 register
87 * @hw: pointer to hardware structure
88 * @reg: register number to write
89 * @value: pointer to receive value read
93 STATIC s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
95 return ixgbe_read_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, value);
99 * ixgbe_write_cs4227 - Write CS4227 register
100 * @hw: pointer to hardware structure
101 * @reg: register number to write
102 * @value: value to write to register
104 * Returns status code
106 STATIC s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
108 return ixgbe_write_i2c_combined_unlocked(hw, IXGBE_CS4227, reg, value);
112 * ixgbe_get_cs4227_status - Return CS4227 status
113 * @hw: pointer to hardware structure
115 * Returns error if CS4227 not successfully initialized
117 STATIC s32 ixgbe_get_cs4227_status(struct ixgbe_hw *hw)
123 for (retry = 0; retry < IXGBE_CS4227_RETRIES; ++retry) {
124 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_GLOBAL_ID_LSB,
126 if (status != IXGBE_SUCCESS)
128 if (value == IXGBE_CS4227_GLOBAL_ID_VALUE)
130 msec_delay(IXGBE_CS4227_CHECK_DELAY);
132 if (value != IXGBE_CS4227_GLOBAL_ID_VALUE)
133 return IXGBE_ERR_PHY;
135 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
136 IXGBE_CS4227_SCRATCH_VALUE);
137 if (status != IXGBE_SUCCESS)
139 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
140 if (status != IXGBE_SUCCESS)
142 if (value != IXGBE_CS4227_SCRATCH_VALUE)
143 return IXGBE_ERR_PHY;
144 return IXGBE_SUCCESS;
148 * ixgbe_read_pe - Read register from port expander
149 * @hw: pointer to hardware structure
150 * @reg: register number to read
151 * @value: pointer to receive read value
153 * Returns status code
155 STATIC s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
159 status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
160 if (status != IXGBE_SUCCESS)
161 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
162 "port expander access failed with %d\n", status);
167 * ixgbe_write_pe - Write register to port expander
168 * @hw: pointer to hardware structure
169 * @reg: register number to write
170 * @value: value to write
172 * Returns status code
174 STATIC s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
178 status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value);
179 if (status != IXGBE_SUCCESS)
180 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
181 "port expander access failed with %d\n", status);
186 * ixgbe_reset_cs4227 - Reset CS4227 using port expander
187 * @hw: pointer to hardware structure
191 STATIC s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
196 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
197 if (status != IXGBE_SUCCESS)
199 reg |= IXGBE_PE_BIT1;
200 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
201 if (status != IXGBE_SUCCESS)
204 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
205 if (status != IXGBE_SUCCESS)
207 reg &= ~IXGBE_PE_BIT1;
208 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
209 if (status != IXGBE_SUCCESS)
212 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
213 if (status != IXGBE_SUCCESS)
215 reg &= ~IXGBE_PE_BIT1;
216 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
217 if (status != IXGBE_SUCCESS)
220 usec_delay(IXGBE_CS4227_RESET_HOLD);
222 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
223 if (status != IXGBE_SUCCESS)
225 reg |= IXGBE_PE_BIT1;
226 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
227 if (status != IXGBE_SUCCESS)
230 msec_delay(IXGBE_CS4227_RESET_DELAY);
232 return IXGBE_SUCCESS;
236 * ixgbe_check_cs4227 - Check CS4227 and reset as needed
237 * @hw: pointer to hardware structure
239 STATIC void ixgbe_check_cs4227(struct ixgbe_hw *hw)
241 u32 swfw_mask = hw->phy.phy_semaphore_mask;
245 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
246 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
247 if (status != IXGBE_SUCCESS) {
248 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
249 "semaphore failed with %d\n", status);
252 status = ixgbe_get_cs4227_status(hw);
253 if (status == IXGBE_SUCCESS) {
254 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
255 msec_delay(hw->eeprom.semaphore_delay);
258 ixgbe_reset_cs4227(hw);
259 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
260 msec_delay(hw->eeprom.semaphore_delay);
262 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
263 "Unable to initialize CS4227, err=%d\n", status);
267 * ixgbe_identify_phy_x550em - Get PHY type based on device id
268 * @hw: pointer to hardware structure
272 STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
274 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
276 switch (hw->device_id) {
277 case IXGBE_DEV_ID_X550EM_X_SFP:
278 /* set up for CS4227 usage */
279 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
280 if (hw->bus.lan_id) {
282 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
283 esdp |= IXGBE_ESDP_SDP1_DIR;
285 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
286 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
288 ixgbe_check_cs4227(hw);
290 return ixgbe_identify_module_generic(hw);
292 case IXGBE_DEV_ID_X550EM_X_KX4:
293 hw->phy.type = ixgbe_phy_x550em_kx4;
295 case IXGBE_DEV_ID_X550EM_X_KR:
296 hw->phy.type = ixgbe_phy_x550em_kr;
298 case IXGBE_DEV_ID_X550EM_X_1G_T:
299 case IXGBE_DEV_ID_X550EM_X_10G_T:
300 return ixgbe_identify_phy_generic(hw);
304 return IXGBE_SUCCESS;
307 STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
308 u32 device_type, u16 *phy_data)
310 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data);
311 return IXGBE_NOT_IMPLEMENTED;
314 STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
315 u32 device_type, u16 phy_data)
317 UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data);
318 return IXGBE_NOT_IMPLEMENTED;
322 * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type
323 * @hw: pointer to hardware structure
325 * Initialize the function pointers and for MAC type X550EM.
326 * Does not touch the hardware.
328 s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
330 struct ixgbe_mac_info *mac = &hw->mac;
331 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
332 struct ixgbe_phy_info *phy = &hw->phy;
335 DEBUGFUNC("ixgbe_init_ops_X550EM");
337 /* Similar to X550 so start there. */
338 ret_val = ixgbe_init_ops_X550(hw);
340 /* Since this function eventually calls
341 * ixgbe_init_ops_540 by design, we are setting
342 * the pointers to NULL explicitly here to overwrite
343 * the values being set in the x540 function.
345 /* Thermal sensor not supported in x550EM */
346 mac->ops.get_thermal_sensor_data = NULL;
347 mac->ops.init_thermal_sensor_thresh = NULL;
348 mac->thermal_sensor_enabled = false;
350 /* FCOE not supported in x550EM */
351 mac->ops.get_san_mac_addr = NULL;
352 mac->ops.set_san_mac_addr = NULL;
353 mac->ops.get_wwn_prefix = NULL;
354 mac->ops.get_fcoe_boot_status = NULL;
356 /* IPsec not supported in x550EM */
357 mac->ops.disable_sec_rx_path = NULL;
358 mac->ops.enable_sec_rx_path = NULL;
360 /* X550EM bus type is internal*/
361 hw->bus.type = ixgbe_bus_type_internal;
362 mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
364 mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
365 mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
366 mac->ops.get_media_type = ixgbe_get_media_type_X550em;
367 mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
368 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em;
369 mac->ops.reset_hw = ixgbe_reset_hw_X550em;
370 mac->ops.get_supported_physical_layer =
371 ixgbe_get_supported_physical_layer_X550em;
374 phy->ops.init = ixgbe_init_phy_ops_X550em;
375 phy->ops.identify = ixgbe_identify_phy_x550em;
376 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
377 phy->ops.set_phy_power = NULL;
381 eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
382 eeprom->ops.read = ixgbe_read_ee_hostif_X550;
383 eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550;
384 eeprom->ops.write = ixgbe_write_ee_hostif_X550;
385 eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550;
386 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550;
387 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550;
388 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550;
394 * ixgbe_dmac_config_X550
395 * @hw: pointer to hardware structure
397 * Configure DMA coalescing. If enabling dmac, dmac is activated.
398 * When disabling dmac, dmac enable dmac bit is cleared.
400 s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw)
402 u32 reg, high_pri_tc;
404 DEBUGFUNC("ixgbe_dmac_config_X550");
406 /* Disable DMA coalescing before configuring */
407 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
408 reg &= ~IXGBE_DMACR_DMAC_EN;
409 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
411 /* Disable DMA Coalescing if the watchdog timer is 0 */
412 if (!hw->mac.dmac_config.watchdog_timer)
415 ixgbe_dmac_config_tcs_X550(hw);
417 /* Configure DMA Coalescing Control Register */
418 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
420 /* Set the watchdog timer in units of 40.96 usec */
421 reg &= ~IXGBE_DMACR_DMACWT_MASK;
422 reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096;
424 reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK;
425 /* If fcoe is enabled, set high priority traffic class */
426 if (hw->mac.dmac_config.fcoe_en) {
427 high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc;
428 reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) &
429 IXGBE_DMACR_HIGH_PRI_TC_MASK);
431 reg |= IXGBE_DMACR_EN_MNG_IND;
433 /* Enable DMA coalescing after configuration */
434 reg |= IXGBE_DMACR_DMAC_EN;
435 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
438 return IXGBE_SUCCESS;
442 * ixgbe_dmac_config_tcs_X550
443 * @hw: pointer to hardware structure
445 * Configure DMA coalescing threshold per TC. The dmac enable bit must
446 * be cleared before configuring.
448 s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
450 u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb;
452 DEBUGFUNC("ixgbe_dmac_config_tcs_X550");
454 /* Configure DMA coalescing enabled */
455 switch (hw->mac.dmac_config.link_speed) {
456 case IXGBE_LINK_SPEED_100_FULL:
457 pb_headroom = IXGBE_DMACRXT_100M;
459 case IXGBE_LINK_SPEED_1GB_FULL:
460 pb_headroom = IXGBE_DMACRXT_1G;
463 pb_headroom = IXGBE_DMACRXT_10G;
467 maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >>
468 IXGBE_MHADD_MFS_SHIFT) / 1024);
470 /* Set the per Rx packet buffer receive threshold */
471 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
472 reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc));
473 reg &= ~IXGBE_DMCTH_DMACRXT_MASK;
475 if (tc < hw->mac.dmac_config.num_tcs) {
477 rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc));
478 rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >>
479 IXGBE_RXPBSIZE_SHIFT;
481 /* Calculate receive buffer threshold in kilobytes */
482 if (rx_pb_size > pb_headroom)
483 rx_pb_size = rx_pb_size - pb_headroom;
487 /* Minimum of MFS shall be set for DMCTH */
488 reg |= (rx_pb_size > maxframe_size_kb) ?
489 rx_pb_size : maxframe_size_kb;
491 IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg);
493 return IXGBE_SUCCESS;
497 * ixgbe_dmac_update_tcs_X550
498 * @hw: pointer to hardware structure
500 * Disables dmac, updates per TC settings, and then enables dmac.
502 s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw)
506 DEBUGFUNC("ixgbe_dmac_update_tcs_X550");
508 /* Disable DMA coalescing before configuring */
509 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
510 reg &= ~IXGBE_DMACR_DMAC_EN;
511 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
513 ixgbe_dmac_config_tcs_X550(hw);
515 /* Enable DMA coalescing after configuration */
516 reg = IXGBE_READ_REG(hw, IXGBE_DMACR);
517 reg |= IXGBE_DMACR_DMAC_EN;
518 IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg);
520 return IXGBE_SUCCESS;
524 * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
525 * @hw: pointer to hardware structure
527 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
528 * ixgbe_hw struct in order to set up EEPROM access.
530 s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
532 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
536 DEBUGFUNC("ixgbe_init_eeprom_params_X550");
538 if (eeprom->type == ixgbe_eeprom_uninitialized) {
539 eeprom->semaphore_delay = 10;
540 eeprom->type = ixgbe_flash;
542 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
543 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
544 IXGBE_EEC_SIZE_SHIFT);
545 eeprom->word_size = 1 << (eeprom_size +
546 IXGBE_EEPROM_WORD_SIZE_SHIFT);
548 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
549 eeprom->type, eeprom->word_size);
552 return IXGBE_SUCCESS;
556 * ixgbe_setup_eee_X550 - Enable/disable EEE support
557 * @hw: pointer to the HW structure
558 * @enable_eee: boolean flag to enable EEE
560 * Enable/disable EEE based on enable_eee flag.
561 * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
565 s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
572 DEBUGFUNC("ixgbe_setup_eee_X550");
574 eeer = IXGBE_READ_REG(hw, IXGBE_EEER);
575 /* Enable or disable EEE per flag */
577 eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
579 if (hw->device_id == IXGBE_DEV_ID_X550T) {
580 /* Advertise EEE capability */
581 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
582 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
584 autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
585 IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
586 IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
588 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
589 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
590 } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
591 status = ixgbe_read_iosf_sb_reg_x550(hw,
592 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
593 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
594 if (status != IXGBE_SUCCESS)
597 link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
598 IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
600 status = ixgbe_write_iosf_sb_reg_x550(hw,
601 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
602 IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
603 if (status != IXGBE_SUCCESS)
607 eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
609 if (hw->device_id == IXGBE_DEV_ID_X550T) {
610 /* Disable advertised EEE capability */
611 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
612 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg);
614 autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
615 IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
616 IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
618 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
619 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg);
620 } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
621 status = ixgbe_read_iosf_sb_reg_x550(hw,
622 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
623 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
624 if (status != IXGBE_SUCCESS)
627 link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
628 IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
630 status = ixgbe_write_iosf_sb_reg_x550(hw,
631 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
632 IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
633 if (status != IXGBE_SUCCESS)
637 IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
639 return IXGBE_SUCCESS;
643 * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
644 * @hw: pointer to hardware structure
645 * @enable: enable or disable source address pruning
646 * @pool: Rx pool to set source address pruning for
648 void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
653 /* max rx pool is 63 */
657 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
658 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
661 pfflp |= (1ULL << pool);
663 pfflp &= ~(1ULL << pool);
665 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
666 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
670 * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing
671 * @hw: pointer to hardware structure
672 * @enable: enable or disable switch for Ethertype anti-spoofing
673 * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
676 void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
679 int vf_target_reg = vf >> 3;
680 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
683 DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550");
685 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
687 pfvfspoof |= (1 << vf_target_shift);
689 pfvfspoof &= ~(1 << vf_target_shift);
691 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
695 * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
697 * @hw: pointer to hardware structure
698 * @reg_addr: 32 bit PHY register to write
699 * @device_type: 3 bit device type
700 * @data: Data to write to the register
702 s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
703 u32 device_type, u32 data)
705 u32 i, command, error;
707 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
708 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
710 /* Write IOSF control register */
711 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
713 /* Write IOSF data register */
714 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
716 * Check every 10 usec to see if the address cycle completed.
717 * The SB IOSF BUSY bit will clear when the operation is
720 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
723 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
724 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
728 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
729 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
730 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
731 ERROR_REPORT2(IXGBE_ERROR_POLLING,
732 "Failed to write, error %x\n", error);
733 return IXGBE_ERR_PHY;
736 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
737 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Write timed out\n");
738 return IXGBE_ERR_PHY;
741 return IXGBE_SUCCESS;
745 * ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
747 * @hw: pointer to hardware structure
748 * @reg_addr: 32 bit PHY register to write
749 * @device_type: 3 bit device type
750 * @phy_data: Pointer to read data from the register
752 s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
753 u32 device_type, u32 *data)
755 u32 i, command, error;
757 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
758 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
760 /* Write IOSF control register */
761 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
764 * Check every 10 usec to see if the address cycle completed.
765 * The SB IOSF BUSY bit will clear when the operation is
768 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
771 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
772 if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
776 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
777 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
778 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
779 ERROR_REPORT2(IXGBE_ERROR_POLLING,
780 "Failed to read, error %x\n", error);
781 return IXGBE_ERR_PHY;
784 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
785 ERROR_REPORT1(IXGBE_ERROR_POLLING, "Read timed out\n");
786 return IXGBE_ERR_PHY;
789 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
791 return IXGBE_SUCCESS;
795 * ixgbe_disable_mdd_X550
796 * @hw: pointer to hardware structure
798 * Disable malicious driver detection
800 void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw)
804 DEBUGFUNC("ixgbe_disable_mdd_X550");
806 /* Disable MDD for TX DMA and interrupt */
807 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
808 reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
809 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
811 /* Disable MDD for RX and interrupt */
812 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
813 reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
814 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
818 * ixgbe_enable_mdd_X550
819 * @hw: pointer to hardware structure
821 * Enable malicious driver detection
823 void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw)
827 DEBUGFUNC("ixgbe_enable_mdd_X550");
829 /* Enable MDD for TX DMA and interrupt */
830 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
831 reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
832 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
834 /* Enable MDD for RX and interrupt */
835 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
836 reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
837 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
841 * ixgbe_restore_mdd_vf_X550
842 * @hw: pointer to hardware structure
845 * Restore VF that was disabled during malicious driver detection event
847 void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf)
849 u32 idx, reg, num_qs, start_q, bitmask;
851 DEBUGFUNC("ixgbe_restore_mdd_vf_X550");
853 /* Map VF to queues */
854 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
855 switch (reg & IXGBE_MRQC_MRQE_MASK) {
856 case IXGBE_MRQC_VMDQRT8TCEN:
857 num_qs = 8; /* 16 VFs / pools */
858 bitmask = 0x000000FF;
860 case IXGBE_MRQC_VMDQRSS32EN:
861 case IXGBE_MRQC_VMDQRT4TCEN:
862 num_qs = 4; /* 32 VFs / pools */
863 bitmask = 0x0000000F;
865 default: /* 64 VFs / pools */
867 bitmask = 0x00000003;
870 start_q = vf * num_qs;
872 /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
875 reg |= (bitmask << (start_q % 32));
876 IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg);
877 IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg);
881 * ixgbe_mdd_event_X550
882 * @hw: pointer to hardware structure
883 * @vf_bitmap: vf bitmap of malicious vfs
885 * Handle malicious driver detection event.
887 void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap)
890 u32 i, j, reg, q, shift, vf, idx;
892 DEBUGFUNC("ixgbe_mdd_event_X550");
894 /* figure out pool size for mapping to vf's */
895 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
896 switch (reg & IXGBE_MRQC_MRQE_MASK) {
897 case IXGBE_MRQC_VMDQRT8TCEN:
898 shift = 3; /* 16 VFs / pools */
900 case IXGBE_MRQC_VMDQRSS32EN:
901 case IXGBE_MRQC_VMDQRT4TCEN:
902 shift = 2; /* 32 VFs / pools */
905 shift = 1; /* 64 VFs / pools */
909 /* Read WQBR_TX and WQBR_RX and check for malicious queues */
910 for (i = 0; i < 4; i++) {
911 wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i));
912 wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
917 /* Get malicious queue */
918 for (j = 0; j < 32 && wqbr; j++) {
920 if (!(wqbr & (1 << j)))
923 /* Get queue from bitmask */
926 /* Map queue to vf */
929 /* Set vf bit in vf_bitmap */
931 vf_bitmap[idx] |= (1 << (vf % 32));
938 * ixgbe_get_media_type_X550em - Get media type
939 * @hw: pointer to hardware structure
941 * Returns the media type (fiber, copper, backplane)
943 enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
945 enum ixgbe_media_type media_type;
947 DEBUGFUNC("ixgbe_get_media_type_X550em");
949 /* Detect if there is a copper PHY attached. */
950 switch (hw->device_id) {
951 case IXGBE_DEV_ID_X550EM_X_KR:
952 case IXGBE_DEV_ID_X550EM_X_KX4:
953 media_type = ixgbe_media_type_backplane;
955 case IXGBE_DEV_ID_X550EM_X_SFP:
956 media_type = ixgbe_media_type_fiber;
958 case IXGBE_DEV_ID_X550EM_X_1G_T:
959 case IXGBE_DEV_ID_X550EM_X_10G_T:
960 media_type = ixgbe_media_type_copper;
963 media_type = ixgbe_media_type_unknown;
970 * ixgbe_setup_sfp_modules_X550em - Setup SFP module
971 * @hw: pointer to hardware structure
973 s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
976 u16 reg_slice, edc_mode;
979 DEBUGFUNC("ixgbe_setup_sfp_modules_X550em");
981 switch (hw->phy.sfp_type) {
982 case ixgbe_sfp_type_unknown:
983 return IXGBE_SUCCESS;
984 case ixgbe_sfp_type_not_present:
985 return IXGBE_ERR_SFP_NOT_PRESENT;
986 case ixgbe_sfp_type_da_cu_core0:
987 case ixgbe_sfp_type_da_cu_core1:
990 case ixgbe_sfp_type_srlr_core0:
991 case ixgbe_sfp_type_srlr_core1:
992 case ixgbe_sfp_type_da_act_lmt_core0:
993 case ixgbe_sfp_type_da_act_lmt_core1:
994 case ixgbe_sfp_type_1g_sx_core0:
995 case ixgbe_sfp_type_1g_sx_core1:
996 case ixgbe_sfp_type_1g_lx_core0:
997 case ixgbe_sfp_type_1g_lx_core1:
998 setup_linear = false;
1001 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1004 ixgbe_init_mac_link_ops_X550em(hw);
1005 hw->phy.ops.reset = NULL;
1007 /* The CS4227 slice address is the base address + the port-pair reg
1008 * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0.
1010 reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12);
1013 edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
1015 edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
1017 /* Configure CS4227 for connection type. */
1018 ret_val = ixgbe_write_i2c_combined(hw, IXGBE_CS4227, reg_slice,
1021 if (ret_val != IXGBE_SUCCESS)
1022 ret_val = ixgbe_write_i2c_combined(hw, 0x80, reg_slice,
1029 * ixgbe_init_mac_link_ops_X550em - init mac link function pointers
1030 * @hw: pointer to hardware structure
1032 void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
1034 struct ixgbe_mac_info *mac = &hw->mac;
1036 DEBUGFUNC("ixgbe_init_mac_link_ops_X550em");
1038 /* CS4227 does not support autoneg, so disable the laser control
1039 * functions for SFP+ fiber
1041 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
1042 mac->ops.disable_tx_laser = NULL;
1043 mac->ops.enable_tx_laser = NULL;
1044 mac->ops.flap_tx_laser = NULL;
1049 * ixgbe_get_link_capabilities_x550em - Determines link capabilities
1050 * @hw: pointer to hardware structure
1051 * @speed: pointer to link speed
1052 * @autoneg: true when autoneg or autotry is enabled
1054 s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
1055 ixgbe_link_speed *speed,
1058 DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
1061 if (hw->phy.media_type == ixgbe_media_type_fiber) {
1063 /* CS4227 SFP must not enable auto-negotiation */
1066 /* Check if 1G SFP module. */
1067 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1068 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
1069 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1070 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
1071 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1072 return IXGBE_SUCCESS;
1075 /* Link capabilities are based on SFP */
1076 if (hw->phy.multispeed_fiber)
1077 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1078 IXGBE_LINK_SPEED_1GB_FULL;
1080 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1082 *speed = IXGBE_LINK_SPEED_10GB_FULL |
1083 IXGBE_LINK_SPEED_1GB_FULL;
1087 return IXGBE_SUCCESS;
1091 * ixgbe_init_phy_ops_X550em - PHY/SFP specific init
1092 * @hw: pointer to hardware structure
1094 * Initialize any function pointers that were not able to be
1095 * set during init_shared_code because the PHY/SFP type was
1096 * not known. Perform the SFP init if necessary.
1098 s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
1100 struct ixgbe_phy_info *phy = &hw->phy;
1104 DEBUGFUNC("ixgbe_init_phy_ops_X550em");
1106 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
1107 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
1108 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
1110 if (hw->bus.lan_id) {
1111 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
1112 esdp |= IXGBE_ESDP_SDP1_DIR;
1114 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
1115 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
1118 /* Identify the PHY or SFP module */
1119 ret_val = phy->ops.identify(hw);
1120 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
1123 /* Setup function pointers based on detected SFP module and speeds */
1124 ixgbe_init_mac_link_ops_X550em(hw);
1125 if (phy->sfp_type != ixgbe_sfp_type_unknown)
1126 phy->ops.reset = NULL;
1128 /* Set functions pointers based on phy type */
1129 switch (hw->phy.type) {
1130 case ixgbe_phy_x550em_kx4:
1131 phy->ops.setup_link = ixgbe_setup_kx4_x550em;
1132 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
1133 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
1135 case ixgbe_phy_x550em_kr:
1136 phy->ops.setup_link = ixgbe_setup_kr_x550em;
1137 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
1138 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
1140 case ixgbe_phy_x550em_ext_t:
1141 phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
1150 * ixgbe_reset_hw_X550em - Perform hardware reset
1151 * @hw: pointer to hardware structure
1153 * Resets the hardware by resetting the transmit and receive units, masks
1154 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1157 s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
1159 struct ixgbe_hic_hdr fw_cmd;
1160 ixgbe_link_speed link_speed;
1164 bool link_up = false;
1166 DEBUGFUNC("ixgbe_reset_hw_X550em");
1168 fw_cmd.cmd = FW_PHY_MGMT_REQ_CMD;
1170 fw_cmd.cmd_or_resp.cmd_resv = 0;
1171 fw_cmd.checksum = FW_DEFAULT_CHECKSUM;
1172 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
1174 IXGBE_HI_PHY_MGMT_REQ_TIMEOUT,
1177 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
1178 "PHY mgmt command failed with %d\n", status);
1179 else if (fw_cmd.cmd_or_resp.ret_status != FW_CEM_RESP_STATUS_SUCCESS)
1180 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
1181 "PHY mgmt command returned %d\n",
1182 fw_cmd.cmd_or_resp.ret_status);
1184 /* Call adapter stop to disable Tx/Rx and clear interrupts */
1185 status = hw->mac.ops.stop_adapter(hw);
1186 if (status != IXGBE_SUCCESS)
1189 /* flush pending Tx transactions */
1190 ixgbe_clear_tx_pending(hw);
1192 /* PHY ops must be identified and initialized prior to reset */
1194 /* Identify PHY and related function pointers */
1195 status = hw->phy.ops.init(hw);
1197 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1200 /* start the external PHY */
1201 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
1202 status = ixgbe_init_ext_t_x550em(hw);
1207 /* Setup SFP module if there is one present. */
1208 if (hw->phy.sfp_setup_needed) {
1209 status = hw->mac.ops.setup_sfp(hw);
1210 hw->phy.sfp_setup_needed = false;
1213 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1217 if (!hw->phy.reset_disable && hw->phy.ops.reset)
1218 hw->phy.ops.reset(hw);
1221 /* Issue global reset to the MAC. Needs to be SW reset if link is up.
1222 * If link reset is used when link is up, it might reset the PHY when
1223 * mng is using it. If link is down or the flag to force full link
1224 * reset is set, then perform link reset.
1226 ctrl = IXGBE_CTRL_LNK_RST;
1227 if (!hw->force_full_reset) {
1228 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1230 ctrl = IXGBE_CTRL_RST;
1233 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1234 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1235 IXGBE_WRITE_FLUSH(hw);
1237 /* Poll for reset bit to self-clear meaning reset is complete */
1238 for (i = 0; i < 10; i++) {
1240 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1241 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1245 if (ctrl & IXGBE_CTRL_RST_MASK) {
1246 status = IXGBE_ERR_RESET_FAILED;
1247 DEBUGOUT("Reset polling failed to complete.\n");
1252 /* Double resets are required for recovery from certain error
1253 * conditions. Between resets, it is necessary to stall to
1254 * allow time for any pending HW events to complete.
1256 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1257 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1261 /* Store the permanent mac address */
1262 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1264 /* Store MAC address from RAR0, clear receive address registers, and
1265 * clear the multicast table. Also reset num_rar_entries to 128,
1266 * since we modify this value when programming the SAN MAC address.
1268 hw->mac.num_rar_entries = 128;
1269 hw->mac.ops.init_rx_addrs(hw);
1276 * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
1277 * @hw: pointer to hardware structure
1279 s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
1285 /* TODO: The number of attempts and delay between attempts is undefined */
1287 /* decrement retries counter and exit if we hit 0 */
1289 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
1290 "External PHY not yet finished resetting.");
1291 return IXGBE_ERR_PHY;
1297 status = hw->phy.ops.read_reg(hw,
1298 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
1299 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1302 if (status != IXGBE_SUCCESS)
1305 /* Verify PHY FW reset has completed */
1306 } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
1308 /* Set port to low power mode */
1309 status = hw->phy.ops.read_reg(hw,
1310 IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
1311 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1314 if (status != IXGBE_SUCCESS)
1317 reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
1319 status = hw->phy.ops.write_reg(hw,
1320 IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
1321 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1324 if (status != IXGBE_SUCCESS)
1327 /* Enable the transmitter */
1328 status = hw->phy.ops.read_reg(hw,
1329 IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
1330 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1333 if (status != IXGBE_SUCCESS)
1336 reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
1338 status = hw->phy.ops.write_reg(hw,
1339 IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
1340 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1343 if (status != IXGBE_SUCCESS)
1346 /* Un-stall the PHY FW */
1347 status = hw->phy.ops.read_reg(hw,
1348 IXGBE_MDIO_GLOBAL_RES_PR_10,
1349 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1352 if (status != IXGBE_SUCCESS)
1355 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
1357 status = hw->phy.ops.write_reg(hw,
1358 IXGBE_MDIO_GLOBAL_RES_PR_10,
1359 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1366 * ixgbe_setup_kr_x550em - Configure the KR PHY.
1367 * @hw: pointer to hardware structure
1369 * Configures the integrated KR PHY.
1371 s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
1376 status = ixgbe_read_iosf_sb_reg_x550(hw,
1377 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1378 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1382 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1383 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
1384 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
1385 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
1386 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
1388 /* Advertise 10G support. */
1389 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
1390 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
1392 /* Advertise 1G support. */
1393 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
1394 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
1396 /* Restart auto-negotiation. */
1397 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1398 status = ixgbe_write_iosf_sb_reg_x550(hw,
1399 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1400 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1406 * ixgbe_setup_kx4_x550em - Configure the KX4 PHY.
1407 * @hw: pointer to hardware structure
1409 * Configures the integrated KX4 PHY.
1411 s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
1416 status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
1417 IXGBE_SB_IOSF_TARGET_KX4_PCS0 + hw->bus.lan_id, ®_val);
1421 reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 |
1422 IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX);
1424 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE;
1426 /* Advertise 10G support. */
1427 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
1428 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4;
1430 /* Advertise 1G support. */
1431 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
1432 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX;
1434 /* Restart auto-negotiation. */
1435 reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
1436 status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
1437 IXGBE_SB_IOSF_TARGET_KX4_PCS0 + hw->bus.lan_id, reg_val);
1443 * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
1444 * @hw: pointer to hardware structure
1445 * @speed: the link speed to force
1447 * Configures the integrated KR PHY to use iXFI mode. Used to connect an
1448 * internal and external PHY at a specific speed, without autonegotiation.
1450 STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
1455 /* Disable AN and force speed to 10G Serial. */
1456 status = ixgbe_read_iosf_sb_reg_x550(hw,
1457 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1458 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1459 if (status != IXGBE_SUCCESS)
1462 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1463 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1465 /* Select forced link speed for internal PHY. */
1467 case IXGBE_LINK_SPEED_10GB_FULL:
1468 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
1470 case IXGBE_LINK_SPEED_1GB_FULL:
1471 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1474 /* Other link speeds are not supported by internal KR PHY. */
1475 return IXGBE_ERR_LINK_SETUP;
1478 status = ixgbe_write_iosf_sb_reg_x550(hw,
1479 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1480 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1481 if (status != IXGBE_SUCCESS)
1484 /* Disable training protocol FSM. */
1485 status = ixgbe_read_iosf_sb_reg_x550(hw,
1486 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1487 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1488 if (status != IXGBE_SUCCESS)
1490 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
1491 status = ixgbe_write_iosf_sb_reg_x550(hw,
1492 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1493 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1494 if (status != IXGBE_SUCCESS)
1497 /* Disable Flex from training TXFFE. */
1498 status = ixgbe_read_iosf_sb_reg_x550(hw,
1499 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
1500 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1501 if (status != IXGBE_SUCCESS)
1503 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
1504 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
1505 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
1506 status = ixgbe_write_iosf_sb_reg_x550(hw,
1507 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
1508 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1509 if (status != IXGBE_SUCCESS)
1511 status = ixgbe_read_iosf_sb_reg_x550(hw,
1512 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
1513 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1514 if (status != IXGBE_SUCCESS)
1516 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
1517 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
1518 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
1519 status = ixgbe_write_iosf_sb_reg_x550(hw,
1520 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
1521 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1522 if (status != IXGBE_SUCCESS)
1525 /* Enable override for coefficients. */
1526 status = ixgbe_read_iosf_sb_reg_x550(hw,
1527 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
1528 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1529 if (status != IXGBE_SUCCESS)
1531 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
1532 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
1533 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
1534 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
1535 status = ixgbe_write_iosf_sb_reg_x550(hw,
1536 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
1537 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1538 if (status != IXGBE_SUCCESS)
1541 /* Toggle port SW reset by AN reset. */
1542 status = ixgbe_read_iosf_sb_reg_x550(hw,
1543 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1544 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1545 if (status != IXGBE_SUCCESS)
1547 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1548 status = ixgbe_write_iosf_sb_reg_x550(hw,
1549 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1550 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1556 * ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
1557 * @hw: point to hardware structure
1559 * Configures the integrated KR PHY to talk to the external PHY. The base
1560 * driver will call this function when it gets notification via interrupt from
1561 * the external PHY. This function forces the internal PHY into iXFI mode at
1562 * the correct speed.
1564 * A return of a non-zero value indicates an error, and the base driver should
1565 * not report link up.
1567 s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
1570 u16 lasi, autoneg_status, speed;
1571 ixgbe_link_speed force_speed;
1573 /* Verify that the external link status has changed */
1574 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
1575 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1577 if (status != IXGBE_SUCCESS)
1580 /* If there was no change in link status, we can just exit */
1581 if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
1582 return IXGBE_SUCCESS;
1584 /* we read this twice back to back to indicate current status */
1585 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
1586 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1588 if (status != IXGBE_SUCCESS)
1591 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
1592 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1594 if (status != IXGBE_SUCCESS)
1597 /* If link is not up return an error indicating treat link as down */
1598 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
1599 return IXGBE_ERR_INVALID_LINK_SETTINGS;
1601 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
1602 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
1605 /* clear everything but the speed and duplex bits */
1606 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
1609 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
1610 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
1612 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
1613 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
1616 /* Internal PHY does not support anything else */
1617 return IXGBE_ERR_INVALID_LINK_SETTINGS;
1620 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
1624 * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback.
1625 * @hw: pointer to hardware structure
1627 * Configures the integrated KR PHY to use internal loopback mode.
1629 s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
1634 /* Disable AN and force speed to 10G Serial. */
1635 status = ixgbe_read_iosf_sb_reg_x550(hw,
1636 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1637 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1638 if (status != IXGBE_SUCCESS)
1640 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1641 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1642 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
1643 status = ixgbe_write_iosf_sb_reg_x550(hw,
1644 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1645 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1646 if (status != IXGBE_SUCCESS)
1649 /* Set near-end loopback clocks. */
1650 status = ixgbe_read_iosf_sb_reg_x550(hw,
1651 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
1652 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1653 if (status != IXGBE_SUCCESS)
1655 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
1656 reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
1657 status = ixgbe_write_iosf_sb_reg_x550(hw,
1658 IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
1659 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1660 if (status != IXGBE_SUCCESS)
1663 /* Set loopback enable. */
1664 status = ixgbe_read_iosf_sb_reg_x550(hw,
1665 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
1666 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1667 if (status != IXGBE_SUCCESS)
1669 reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
1670 status = ixgbe_write_iosf_sb_reg_x550(hw,
1671 IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
1672 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1673 if (status != IXGBE_SUCCESS)
1676 /* Training bypass. */
1677 status = ixgbe_read_iosf_sb_reg_x550(hw,
1678 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1679 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1680 if (status != IXGBE_SUCCESS)
1682 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
1683 status = ixgbe_write_iosf_sb_reg_x550(hw,
1684 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1685 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1691 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
1692 * assuming that the semaphore is already obtained.
1693 * @hw: pointer to hardware structure
1694 * @offset: offset of word in the EEPROM to read
1695 * @data: word read from the EEPROM
1697 * Reads a 16 bit word from the EEPROM using the hostif.
1699 s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
1703 struct ixgbe_hic_read_shadow_ram buffer;
1705 DEBUGFUNC("ixgbe_read_ee_hostif_data_X550");
1706 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
1707 buffer.hdr.req.buf_lenh = 0;
1708 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
1709 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
1711 /* convert offset from words to bytes */
1712 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
1714 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
1716 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
1718 IXGBE_HI_COMMAND_TIMEOUT, false);
1723 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
1724 FW_NVM_DATA_OFFSET);
1730 * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
1731 * @hw: pointer to hardware structure
1732 * @offset: offset of word in the EEPROM to read
1733 * @data: word read from the EEPROM
1735 * Reads a 16 bit word from the EEPROM using the hostif.
1737 s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
1740 s32 status = IXGBE_SUCCESS;
1742 DEBUGFUNC("ixgbe_read_ee_hostif_X550");
1744 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
1746 status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
1747 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1749 status = IXGBE_ERR_SWFW_SYNC;
1756 * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
1757 * @hw: pointer to hardware structure
1758 * @offset: offset of word in the EEPROM to read
1759 * @words: number of words
1760 * @data: word(s) read from the EEPROM
1762 * Reads a 16 bit word(s) from the EEPROM using the hostif.
1764 s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
1765 u16 offset, u16 words, u16 *data)
1767 struct ixgbe_hic_read_shadow_ram buffer;
1768 u32 current_word = 0;
1773 DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550");
1775 /* Take semaphore for the entire operation. */
1776 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1778 DEBUGOUT("EEPROM read buffer - semaphore failed\n");
1782 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
1783 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
1785 words_to_read = words;
1787 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
1788 buffer.hdr.req.buf_lenh = 0;
1789 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
1790 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
1792 /* convert offset from words to bytes */
1793 buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2);
1794 buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2);
1796 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
1798 IXGBE_HI_COMMAND_TIMEOUT,
1802 DEBUGOUT("Host interface command failed\n");
1806 for (i = 0; i < words_to_read; i++) {
1807 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
1809 u32 value = IXGBE_READ_REG(hw, reg);
1811 data[current_word] = (u16)(value & 0xffff);
1814 if (i < words_to_read) {
1816 data[current_word] = (u16)(value & 0xffff);
1820 words -= words_to_read;
1824 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1829 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
1830 * @hw: pointer to hardware structure
1831 * @offset: offset of word in the EEPROM to write
1832 * @data: word write to the EEPROM
1834 * Write a 16 bit word to the EEPROM using the hostif.
1836 s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
1840 struct ixgbe_hic_write_shadow_ram buffer;
1842 DEBUGFUNC("ixgbe_write_ee_hostif_data_X550");
1844 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
1845 buffer.hdr.req.buf_lenh = 0;
1846 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
1847 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
1850 buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16));
1852 buffer.address = IXGBE_CPU_TO_BE32(offset * 2);
1854 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
1856 IXGBE_HI_COMMAND_TIMEOUT, false);
1862 * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
1863 * @hw: pointer to hardware structure
1864 * @offset: offset of word in the EEPROM to write
1865 * @data: word write to the EEPROM
1867 * Write a 16 bit word to the EEPROM using the hostif.
1869 s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
1872 s32 status = IXGBE_SUCCESS;
1874 DEBUGFUNC("ixgbe_write_ee_hostif_X550");
1876 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
1878 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
1879 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1881 DEBUGOUT("write ee hostif failed to get semaphore");
1882 status = IXGBE_ERR_SWFW_SYNC;
1889 * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
1890 * @hw: pointer to hardware structure
1891 * @offset: offset of word in the EEPROM to write
1892 * @words: number of words
1893 * @data: word(s) write to the EEPROM
1895 * Write a 16 bit word(s) to the EEPROM using the hostif.
1897 s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
1898 u16 offset, u16 words, u16 *data)
1900 s32 status = IXGBE_SUCCESS;
1903 DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550");
1905 /* Take semaphore for the entire operation. */
1906 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1907 if (status != IXGBE_SUCCESS) {
1908 DEBUGOUT("EEPROM write buffer - semaphore failed\n");
1912 for (i = 0; i < words; i++) {
1913 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
1916 if (status != IXGBE_SUCCESS) {
1917 DEBUGOUT("Eeprom buffered write failed\n");
1922 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1929 * ixgbe_checksum_ptr_x550 - Checksum one pointer region
1930 * @hw: pointer to hardware structure
1931 * @ptr: pointer offset in eeprom
1932 * @size: size of section pointed by ptr, if 0 first word will be used as size
1933 * @csum: address of checksum to update
1935 * Returns error status for any failure
1937 STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
1938 u16 size, u16 *csum, u16 *buffer,
1943 u16 length, bufsz, i, start;
1946 bufsz = sizeof(buf) / sizeof(buf[0]);
1948 /* Read a chunk at the pointer location */
1950 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
1952 DEBUGOUT("Failed to read EEPROM image\n");
1957 if (buffer_size < ptr)
1958 return IXGBE_ERR_PARAM;
1959 local_buffer = &buffer[ptr];
1967 length = local_buffer[0];
1969 /* Skip pointer section if length is invalid. */
1970 if (length == 0xFFFF || length == 0 ||
1971 (ptr + length) >= hw->eeprom.word_size)
1972 return IXGBE_SUCCESS;
1975 if (buffer && ((u32)start + (u32)length > buffer_size))
1976 return IXGBE_ERR_PARAM;
1978 for (i = start; length; i++, length--) {
1979 if (i == bufsz && !buffer) {
1985 /* Read a chunk at the pointer location */
1986 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
1989 DEBUGOUT("Failed to read EEPROM image\n");
1993 *csum += local_buffer[i];
1995 return IXGBE_SUCCESS;
1999 * ixgbe_calc_checksum_X550 - Calculates and returns the checksum
2000 * @hw: pointer to hardware structure
2001 * @buffer: pointer to buffer containing calculated checksum
2002 * @buffer_size: size of buffer
2004 * Returns a negative error code on error, or the 16-bit checksum
2006 s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size)
2008 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
2012 u16 pointer, i, size;
2014 DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550");
2016 hw->eeprom.ops.init_params(hw);
2019 /* Read pointer area */
2020 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
2021 IXGBE_EEPROM_LAST_WORD + 1,
2024 DEBUGOUT("Failed to read EEPROM image\n");
2027 local_buffer = eeprom_ptrs;
2029 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
2030 return IXGBE_ERR_PARAM;
2031 local_buffer = buffer;
2035 * For X550 hardware include 0x0-0x41 in the checksum, skip the
2036 * checksum word itself
2038 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
2039 if (i != IXGBE_EEPROM_CHECKSUM)
2040 checksum += local_buffer[i];
2043 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
2044 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
2046 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
2047 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
2050 pointer = local_buffer[i];
2052 /* Skip pointer section if the pointer is invalid. */
2053 if (pointer == 0xFFFF || pointer == 0 ||
2054 pointer >= hw->eeprom.word_size)
2058 case IXGBE_PCIE_GENERAL_PTR:
2059 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
2061 case IXGBE_PCIE_CONFIG0_PTR:
2062 case IXGBE_PCIE_CONFIG1_PTR:
2063 size = IXGBE_PCIE_CONFIG_SIZE;
2070 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
2071 buffer, buffer_size);
2076 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2078 return (s32)checksum;
2082 * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
2083 * @hw: pointer to hardware structure
2085 * Returns a negative error code on error, or the 16-bit checksum
2087 s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
2089 return ixgbe_calc_checksum_X550(hw, NULL, 0);
2093 * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
2094 * @hw: pointer to hardware structure
2095 * @checksum_val: calculated checksum
2097 * Performs checksum calculation and validates the EEPROM checksum. If the
2098 * caller does not need checksum_val, the value can be NULL.
2100 s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val)
2104 u16 read_checksum = 0;
2106 DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550");
2108 /* Read the first word from the EEPROM. If this times out or fails, do
2109 * not continue or we could be in for a very long wait while every
2112 status = hw->eeprom.ops.read(hw, 0, &checksum);
2114 DEBUGOUT("EEPROM read failed\n");
2118 status = hw->eeprom.ops.calc_checksum(hw);
2122 checksum = (u16)(status & 0xffff);
2124 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
2129 /* Verify read checksum from EEPROM is the same as
2130 * calculated checksum
2132 if (read_checksum != checksum) {
2133 status = IXGBE_ERR_EEPROM_CHECKSUM;
2134 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
2135 "Invalid EEPROM checksum");
2138 /* If the user cares, return the calculated checksum */
2140 *checksum_val = checksum;
2146 * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
2147 * @hw: pointer to hardware structure
2149 * After writing EEPROM to shadow RAM using EEWR register, software calculates
2150 * checksum and updates the EEPROM and instructs the hardware to update
2153 s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
2158 DEBUGFUNC("ixgbe_update_eeprom_checksum_X550");
2160 /* Read the first word from the EEPROM. If this times out or fails, do
2161 * not continue or we could be in for a very long wait while every
2164 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
2166 DEBUGOUT("EEPROM read failed\n");
2170 status = ixgbe_calc_eeprom_checksum_X550(hw);
2174 checksum = (u16)(status & 0xffff);
2176 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
2181 status = ixgbe_update_flash_X550(hw);
2187 * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
2188 * @hw: pointer to hardware structure
2190 * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
2192 s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
2194 s32 status = IXGBE_SUCCESS;
2195 union ixgbe_hic_hdr2 buffer;
2197 DEBUGFUNC("ixgbe_update_flash_X550");
2199 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
2200 buffer.req.buf_lenh = 0;
2201 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
2202 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
2204 status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
2206 IXGBE_HI_COMMAND_TIMEOUT, false);
2212 * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type
2213 * @hw: pointer to hardware structure
2215 * Determines physical layer capabilities of the current configuration.
2217 u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
2219 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2220 u16 ext_ability = 0;
2222 DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
2224 hw->phy.ops.identify(hw);
2226 switch (hw->phy.type) {
2227 case ixgbe_phy_x550em_kr:
2228 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
2229 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2231 case ixgbe_phy_x550em_kx4:
2232 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2233 IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2235 case ixgbe_phy_x550em_ext_t:
2236 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2237 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
2239 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2240 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2241 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2242 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2248 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
2249 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2251 return physical_layer;
2255 * ixgbe_get_bus_info_x550em - Set PCI bus info
2256 * @hw: pointer to hardware structure
2258 * Sets bus link width and speed to unknown because X550em is
2261 s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
2264 DEBUGFUNC("ixgbe_get_bus_info_x550em");
2266 hw->bus.width = ixgbe_bus_width_unknown;
2267 hw->bus.speed = ixgbe_bus_speed_unknown;
2269 return IXGBE_SUCCESS;
2273 * ixgbe_disable_rx_x550 - Disable RX unit
2275 * Enables the Rx DMA unit for x550
2277 void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
2279 u32 rxctrl, pfdtxgswc;
2281 struct ixgbe_hic_disable_rxen fw_cmd;
2283 DEBUGFUNC("ixgbe_enable_rx_dma_x550");
2285 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2286 if (rxctrl & IXGBE_RXCTRL_RXEN) {
2287 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
2288 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
2289 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
2290 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
2291 hw->mac.set_lben = true;
2293 hw->mac.set_lben = false;
2296 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
2297 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
2298 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
2299 fw_cmd.port_number = (u8)hw->bus.lan_id;
2301 status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
2302 sizeof(struct ixgbe_hic_disable_rxen),
2303 IXGBE_HI_COMMAND_TIMEOUT, true);
2305 /* If we fail - disable RX using register write */
2307 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2308 if (rxctrl & IXGBE_RXCTRL_RXEN) {
2309 rxctrl &= ~IXGBE_RXCTRL_RXEN;
2310 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);