1 /*******************************************************************************
3 Copyright (c) 2001-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_type.h"
35 #include "ixgbe_82599.h"
36 #include "ixgbe_api.h"
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
39 #ident "$Id: ixgbe_82599.c,v 1.334 2013/12/04 22:34:00 jtkirshe Exp $"
41 #define IXGBE_82599_MAX_TX_QUEUES 128
42 #define IXGBE_82599_MAX_RX_QUEUES 128
43 #define IXGBE_82599_RAR_ENTRIES 128
44 #define IXGBE_82599_MC_TBL_SIZE 128
45 #define IXGBE_82599_VFT_TBL_SIZE 128
46 #define IXGBE_82599_RX_PB_SIZE 512
48 STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
49 ixgbe_link_speed speed,
50 bool autoneg_wait_to_complete);
51 STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
52 STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
53 u16 offset, u16 *data);
54 STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
55 u16 words, u16 *data);
56 STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
57 u8 dev_addr, u8 *data);
58 STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
59 u8 dev_addr, u8 data);
61 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
63 struct ixgbe_mac_info *mac = &hw->mac;
65 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
68 * enable the laser control functions for SFP+ fiber
71 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
72 !ixgbe_mng_enabled(hw)) {
73 mac->ops.disable_tx_laser =
74 &ixgbe_disable_tx_laser_multispeed_fiber;
75 mac->ops.enable_tx_laser =
76 &ixgbe_enable_tx_laser_multispeed_fiber;
77 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
80 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL;
82 mac->ops.flap_tx_laser = NULL;
85 if (hw->phy.multispeed_fiber) {
86 /* Set up dual speed SFP+ support */
87 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
89 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
90 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
91 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
92 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
93 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
95 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
101 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
102 * @hw: pointer to hardware structure
104 * Initialize any function pointers that were not able to be
105 * set during init_shared_code because the PHY/SFP type was
106 * not known. Perform the SFP init if necessary.
109 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
111 struct ixgbe_mac_info *mac = &hw->mac;
112 struct ixgbe_phy_info *phy = &hw->phy;
113 s32 ret_val = IXGBE_SUCCESS;
116 DEBUGFUNC("ixgbe_init_phy_ops_82599");
118 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
119 /* Store flag indicating I2C bus access control unit. */
120 hw->phy.qsfp_shared_i2c_bus = TRUE;
122 /* Initialize access to QSFP+ I2C bus */
123 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
124 esdp |= IXGBE_ESDP_SDP0_DIR;
125 esdp &= ~IXGBE_ESDP_SDP1_DIR;
126 esdp &= ~IXGBE_ESDP_SDP0;
127 esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
128 esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
129 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
130 IXGBE_WRITE_FLUSH(hw);
132 phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
133 phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
135 /* Identify the PHY or SFP module */
136 ret_val = phy->ops.identify(hw);
137 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
138 goto init_phy_ops_out;
140 /* Setup function pointers based on detected SFP module and speeds */
141 ixgbe_init_mac_link_ops_82599(hw);
142 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
143 hw->phy.ops.reset = NULL;
145 /* If copper media, overwrite with copper function pointers */
146 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
147 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
148 mac->ops.get_link_capabilities =
149 &ixgbe_get_copper_link_capabilities_generic;
152 /* Set necessary function pointers based on PHY type */
153 switch (hw->phy.type) {
155 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
156 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
157 phy->ops.get_firmware_version =
158 &ixgbe_get_phy_firmware_version_tnx;
167 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
169 s32 ret_val = IXGBE_SUCCESS;
170 u16 list_offset, data_offset, data_value;
172 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
174 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
175 ixgbe_init_mac_link_ops_82599(hw);
177 hw->phy.ops.reset = NULL;
179 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
181 if (ret_val != IXGBE_SUCCESS)
184 /* PHY config will finish before releasing the semaphore */
185 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
186 IXGBE_GSSR_MAC_CSR_SM);
187 if (ret_val != IXGBE_SUCCESS) {
188 ret_val = IXGBE_ERR_SWFW_SYNC;
192 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
194 while (data_value != 0xffff) {
195 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
196 IXGBE_WRITE_FLUSH(hw);
197 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
201 /* Release the semaphore */
202 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
203 /* Delay obtaining semaphore again to allow FW access
204 * prot_autoc_write uses the semaphore too.
206 msec_delay(hw->eeprom.semaphore_delay);
208 /* Restart DSP and set SFI mode */
209 ret_val = hw->mac.ops.prot_autoc_write(hw,
210 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
214 DEBUGOUT("sfp module setup not complete\n");
215 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
225 /* Release the semaphore */
226 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
227 /* Delay obtaining semaphore again to allow FW access */
228 msec_delay(hw->eeprom.semaphore_delay);
229 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
230 "eeprom read at offset %d failed", data_offset);
231 return IXGBE_ERR_PHY;
235 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
236 * @hw: pointer to hardware structure
237 * @locked: Return the if we locked for this read.
238 * @reg_val: Value we read from AUTOC
240 * For this part (82599) we need to wrap read-modify-writes with a possible
241 * FW/SW lock. It is assumed this lock will be freed with the next
242 * prot_autoc_write_82599().
244 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
249 /* If LESM is on then we need to hold the SW/FW semaphore. */
250 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
251 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
252 IXGBE_GSSR_MAC_CSR_SM);
253 if (ret_val != IXGBE_SUCCESS)
254 return IXGBE_ERR_SWFW_SYNC;
259 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
260 return IXGBE_SUCCESS;
264 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
265 * @hw: pointer to hardware structure
266 * @reg_val: value to write to AUTOC
267 * @locked: bool to indicate whether the SW/FW lock was already taken by
268 * previous proc_autoc_read_82599.
270 * This part (82599) may need to hold the SW/FW lock around all writes to
271 * AUTOC. Likewise after a write we need to do a pipeline reset.
273 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
275 s32 ret_val = IXGBE_SUCCESS;
277 /* Blocked by MNG FW so bail */
278 if (ixgbe_check_reset_blocked(hw))
281 /* We only need to get the lock if:
282 * - We didn't do it already (in the read part of a read-modify-write)
285 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
286 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
287 IXGBE_GSSR_MAC_CSR_SM);
288 if (ret_val != IXGBE_SUCCESS)
289 return IXGBE_ERR_SWFW_SYNC;
294 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
295 ret_val = ixgbe_reset_pipeline_82599(hw);
298 /* Free the SW/FW semaphore as we either grabbed it here or
299 * already had it when this function was called.
302 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
308 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
309 * @hw: pointer to hardware structure
311 * Initialize the function pointers and assign the MAC type for 82599.
312 * Does not touch the hardware.
315 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
317 struct ixgbe_mac_info *mac = &hw->mac;
318 struct ixgbe_phy_info *phy = &hw->phy;
319 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
322 DEBUGFUNC("ixgbe_init_ops_82599");
324 ixgbe_init_phy_ops_generic(hw);
325 ret_val = ixgbe_init_ops_generic(hw);
328 phy->ops.identify = &ixgbe_identify_phy_82599;
329 phy->ops.init = &ixgbe_init_phy_ops_82599;
332 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
333 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
334 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
335 mac->ops.get_supported_physical_layer =
336 &ixgbe_get_supported_physical_layer_82599;
337 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
338 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
339 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
340 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
341 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
342 mac->ops.start_hw = &ixgbe_start_hw_82599;
343 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
344 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
345 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
346 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
347 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
348 mac->ops.prot_autoc_read = &prot_autoc_read_82599;
349 mac->ops.prot_autoc_write = &prot_autoc_write_82599;
351 /* RAR, Multicast, VLAN */
352 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
353 mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
354 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
355 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
356 mac->rar_highwater = 1;
357 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
358 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
359 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
360 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
361 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
362 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
363 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
366 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
367 mac->ops.check_link = &ixgbe_check_mac_link_generic;
368 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
369 ixgbe_init_mac_link_ops_82599(hw);
371 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
372 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
373 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
374 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
375 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
376 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
377 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
379 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
380 IXGBE_FWSM_MODE_MASK) ? true : false;
382 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
385 eeprom->ops.read = &ixgbe_read_eeprom_82599;
386 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
388 /* Manageability interface */
389 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
392 mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
398 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
399 * @hw: pointer to hardware structure
400 * @speed: pointer to link speed
401 * @autoneg: true when autoneg or autotry is enabled
403 * Determines the link capabilities by reading the AUTOC register.
405 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
406 ixgbe_link_speed *speed,
409 s32 status = IXGBE_SUCCESS;
412 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
415 /* Check if 1G SFP module. */
416 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
417 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
418 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
419 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
420 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
421 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
422 *speed = IXGBE_LINK_SPEED_1GB_FULL;
428 * Determine link capabilities based on the stored value of AUTOC,
429 * which represents EEPROM defaults. If AUTOC value has not
430 * been stored, use the current register values.
432 if (hw->mac.orig_link_settings_stored)
433 autoc = hw->mac.orig_autoc;
435 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
437 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
438 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
439 *speed = IXGBE_LINK_SPEED_1GB_FULL;
443 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
444 *speed = IXGBE_LINK_SPEED_10GB_FULL;
448 case IXGBE_AUTOC_LMS_1G_AN:
449 *speed = IXGBE_LINK_SPEED_1GB_FULL;
453 case IXGBE_AUTOC_LMS_10G_SERIAL:
454 *speed = IXGBE_LINK_SPEED_10GB_FULL;
458 case IXGBE_AUTOC_LMS_KX4_KX_KR:
459 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
460 *speed = IXGBE_LINK_SPEED_UNKNOWN;
461 if (autoc & IXGBE_AUTOC_KR_SUPP)
462 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
463 if (autoc & IXGBE_AUTOC_KX4_SUPP)
464 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
465 if (autoc & IXGBE_AUTOC_KX_SUPP)
466 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
470 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
471 *speed = IXGBE_LINK_SPEED_100_FULL;
472 if (autoc & IXGBE_AUTOC_KR_SUPP)
473 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
474 if (autoc & IXGBE_AUTOC_KX4_SUPP)
475 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
476 if (autoc & IXGBE_AUTOC_KX_SUPP)
477 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
481 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
482 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
487 status = IXGBE_ERR_LINK_SETUP;
492 if (hw->phy.multispeed_fiber) {
493 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
494 IXGBE_LINK_SPEED_1GB_FULL;
496 /* QSFP must not enable full auto-negotiation
497 * Limited autoneg is enabled at 1G
499 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
510 * ixgbe_get_media_type_82599 - Get media type
511 * @hw: pointer to hardware structure
513 * Returns the media type (fiber, copper, backplane)
515 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
517 enum ixgbe_media_type media_type;
519 DEBUGFUNC("ixgbe_get_media_type_82599");
521 /* Detect if there is a copper PHY attached. */
522 switch (hw->phy.type) {
523 case ixgbe_phy_cu_unknown:
525 media_type = ixgbe_media_type_copper;
531 switch (hw->device_id) {
532 case IXGBE_DEV_ID_82599_KX4:
533 case IXGBE_DEV_ID_82599_KX4_MEZZ:
534 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
535 case IXGBE_DEV_ID_82599_KR:
536 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
537 case IXGBE_DEV_ID_82599_XAUI_LOM:
538 /* Default device ID is mezzanine card KX/KX4 */
539 media_type = ixgbe_media_type_backplane;
541 case IXGBE_DEV_ID_82599_SFP:
542 case IXGBE_DEV_ID_82599_SFP_FCOE:
543 case IXGBE_DEV_ID_82599_SFP_EM:
544 case IXGBE_DEV_ID_82599_SFP_SF2:
545 case IXGBE_DEV_ID_82599_SFP_SF_QP:
546 case IXGBE_DEV_ID_82599EN_SFP:
547 media_type = ixgbe_media_type_fiber;
549 case IXGBE_DEV_ID_82599_CX4:
550 media_type = ixgbe_media_type_cx4;
552 case IXGBE_DEV_ID_82599_T3_LOM:
553 media_type = ixgbe_media_type_copper;
555 case IXGBE_DEV_ID_82599_LS:
556 media_type = ixgbe_media_type_fiber_lco;
558 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
559 media_type = ixgbe_media_type_fiber_qsfp;
562 media_type = ixgbe_media_type_unknown;
570 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
571 * @hw: pointer to hardware structure
573 * Disables link during D3 power down sequence.
576 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
578 u32 autoc2_reg, fwsm;
581 DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
582 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
584 /* Check to see if MNG FW could be enabled */
585 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
587 if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
589 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
590 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
591 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
592 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
597 * ixgbe_start_mac_link_82599 - Setup MAC link settings
598 * @hw: pointer to hardware structure
599 * @autoneg_wait_to_complete: true when waiting for completion is needed
601 * Configures link settings based on values in the ixgbe_hw struct.
602 * Restarts the link. Performs autonegotiation if needed.
604 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
605 bool autoneg_wait_to_complete)
610 s32 status = IXGBE_SUCCESS;
611 bool got_lock = false;
613 DEBUGFUNC("ixgbe_start_mac_link_82599");
616 /* reset_pipeline requires us to hold this lock as it writes to
619 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
620 status = hw->mac.ops.acquire_swfw_sync(hw,
621 IXGBE_GSSR_MAC_CSR_SM);
622 if (status != IXGBE_SUCCESS)
629 ixgbe_reset_pipeline_82599(hw);
632 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
634 /* Only poll for autoneg to complete if specified to do so */
635 if (autoneg_wait_to_complete) {
636 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
637 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
638 IXGBE_AUTOC_LMS_KX4_KX_KR ||
639 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
640 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
641 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
642 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
643 links_reg = 0; /* Just in case Autoneg time = 0 */
644 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
645 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
646 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
650 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
651 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
652 DEBUGOUT("Autoneg did not complete.\n");
657 /* Add delay to filter out noises during initial link setup */
665 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
666 * @hw: pointer to hardware structure
668 * The base drivers may require better control over SFP+ module
669 * PHY states. This includes selectively shutting down the Tx
670 * laser on the PHY, effectively halting physical link.
672 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
674 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
676 /* Blocked by MNG FW so bail */
677 if (ixgbe_check_reset_blocked(hw))
680 /* Disable Tx laser; allow 100us to go dark per spec */
681 esdp_reg |= IXGBE_ESDP_SDP3;
682 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
683 IXGBE_WRITE_FLUSH(hw);
688 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
689 * @hw: pointer to hardware structure
691 * The base drivers may require better control over SFP+ module
692 * PHY states. This includes selectively turning on the Tx
693 * laser on the PHY, effectively starting physical link.
695 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
697 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
699 /* Enable Tx laser; allow 100ms to light up */
700 esdp_reg &= ~IXGBE_ESDP_SDP3;
701 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
702 IXGBE_WRITE_FLUSH(hw);
707 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
708 * @hw: pointer to hardware structure
710 * When the driver changes the link speeds that it can support,
711 * it sets autotry_restart to true to indicate that we need to
712 * initiate a new autotry session with the link partner. To do
713 * so, we set the speed then disable and re-enable the Tx laser, to
714 * alert the link partner that it also needs to restart autotry on its
715 * end. This is consistent with true clause 37 autoneg, which also
716 * involves a loss of signal.
718 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
720 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
722 /* Blocked by MNG FW so bail */
723 if (ixgbe_check_reset_blocked(hw))
726 if (hw->mac.autotry_restart) {
727 ixgbe_disable_tx_laser_multispeed_fiber(hw);
728 ixgbe_enable_tx_laser_multispeed_fiber(hw);
729 hw->mac.autotry_restart = false;
735 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
736 * @hw: pointer to hardware structure
737 * @speed: new link speed
738 * @autoneg_wait_to_complete: true when waiting for completion is needed
740 * Set the link speed in the AUTOC register and restarts link.
742 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
743 ixgbe_link_speed speed,
744 bool autoneg_wait_to_complete)
746 s32 status = IXGBE_SUCCESS;
747 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
748 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
750 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
752 bool autoneg, link_up = false;
754 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
756 /* Mask off requested but non-supported speeds */
757 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
758 if (status != IXGBE_SUCCESS)
764 * Try each speed one by one, highest priority first. We do this in
765 * software because 10gb fiber doesn't support speed autonegotiation.
767 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
769 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
771 /* If we already have link at this speed, just jump out */
772 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
773 if (status != IXGBE_SUCCESS)
776 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
779 /* Set the module link speed */
780 switch (hw->phy.media_type) {
781 case ixgbe_media_type_fiber:
782 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
783 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
784 IXGBE_WRITE_FLUSH(hw);
786 case ixgbe_media_type_fiber_qsfp:
787 /* QSFP module automatically detects MAC link speed */
790 DEBUGOUT("Unexpected media type.\n");
794 /* Allow module to change analog characteristics (1G->10G) */
797 status = ixgbe_setup_mac_link_82599(hw,
798 IXGBE_LINK_SPEED_10GB_FULL,
799 autoneg_wait_to_complete);
800 if (status != IXGBE_SUCCESS)
803 /* Flap the tx laser if it has not already been done */
804 ixgbe_flap_tx_laser(hw);
807 * Wait for the controller to acquire link. Per IEEE 802.3ap,
808 * Section 73.10.2, we may have to wait up to 500ms if KR is
809 * attempted. 82599 uses the same timing for 10g SFI.
811 for (i = 0; i < 5; i++) {
812 /* Wait for the link partner to also set speed */
815 /* If we have link, just jump out */
816 status = ixgbe_check_link(hw, &link_speed,
818 if (status != IXGBE_SUCCESS)
826 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
828 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
829 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
831 /* If we already have link at this speed, just jump out */
832 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
833 if (status != IXGBE_SUCCESS)
836 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
839 /* Set the module link speed */
840 switch (hw->phy.media_type) {
841 case ixgbe_media_type_fiber:
842 esdp_reg &= ~IXGBE_ESDP_SDP5;
843 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
844 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
845 IXGBE_WRITE_FLUSH(hw);
847 case ixgbe_media_type_fiber_qsfp:
848 /* QSFP module automatically detects link speed */
851 DEBUGOUT("Unexpected media type.\n");
855 /* Allow module to change analog characteristics (10G->1G) */
858 status = ixgbe_setup_mac_link_82599(hw,
859 IXGBE_LINK_SPEED_1GB_FULL,
860 autoneg_wait_to_complete);
861 if (status != IXGBE_SUCCESS)
864 /* Flap the Tx laser if it has not already been done */
865 ixgbe_flap_tx_laser(hw);
867 /* Wait for the link partner to also set speed */
870 /* If we have link, just jump out */
871 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
872 if (status != IXGBE_SUCCESS)
880 * We didn't get link. Configure back to the highest speed we tried,
881 * (if there was more than one). We call ourselves back with just the
882 * single highest speed that the user requested.
885 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
886 highest_link_speed, autoneg_wait_to_complete);
889 /* Set autoneg_advertised value based on input link speed */
890 hw->phy.autoneg_advertised = 0;
892 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
893 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
895 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
896 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
902 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
903 * @hw: pointer to hardware structure
904 * @speed: new link speed
905 * @autoneg_wait_to_complete: true when waiting for completion is needed
907 * Implements the Intel SmartSpeed algorithm.
909 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
910 ixgbe_link_speed speed,
911 bool autoneg_wait_to_complete)
913 s32 status = IXGBE_SUCCESS;
914 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
916 bool link_up = false;
917 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
919 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
921 /* Set autoneg_advertised value based on input link speed */
922 hw->phy.autoneg_advertised = 0;
924 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
925 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
927 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
928 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
930 if (speed & IXGBE_LINK_SPEED_100_FULL)
931 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
934 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
935 * autoneg advertisement if link is unable to be established at the
936 * highest negotiated rate. This can sometimes happen due to integrity
937 * issues with the physical media connection.
940 /* First, try to get link with full advertisement */
941 hw->phy.smart_speed_active = false;
942 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
943 status = ixgbe_setup_mac_link_82599(hw, speed,
944 autoneg_wait_to_complete);
945 if (status != IXGBE_SUCCESS)
949 * Wait for the controller to acquire link. Per IEEE 802.3ap,
950 * Section 73.10.2, we may have to wait up to 500ms if KR is
951 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
952 * Table 9 in the AN MAS.
954 for (i = 0; i < 5; i++) {
957 /* If we have link, just jump out */
958 status = ixgbe_check_link(hw, &link_speed, &link_up,
960 if (status != IXGBE_SUCCESS)
969 * We didn't get link. If we advertised KR plus one of KX4/KX
970 * (or BX4/BX), then disable KR and try again.
972 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
973 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
976 /* Turn SmartSpeed on to disable KR support */
977 hw->phy.smart_speed_active = true;
978 status = ixgbe_setup_mac_link_82599(hw, speed,
979 autoneg_wait_to_complete);
980 if (status != IXGBE_SUCCESS)
984 * Wait for the controller to acquire link. 600ms will allow for
985 * the AN link_fail_inhibit_timer as well for multiple cycles of
986 * parallel detect, both 10g and 1g. This allows for the maximum
987 * connect attempts as defined in the AN MAS table 73-7.
989 for (i = 0; i < 6; i++) {
992 /* If we have link, just jump out */
993 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
994 if (status != IXGBE_SUCCESS)
1001 /* We didn't get link. Turn SmartSpeed back off. */
1002 hw->phy.smart_speed_active = false;
1003 status = ixgbe_setup_mac_link_82599(hw, speed,
1004 autoneg_wait_to_complete);
1007 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
1008 DEBUGOUT("Smartspeed has downgraded the link speed "
1009 "from the maximum advertised\n");
1014 * ixgbe_setup_mac_link_82599 - Set MAC link speed
1015 * @hw: pointer to hardware structure
1016 * @speed: new link speed
1017 * @autoneg_wait_to_complete: true when waiting for completion is needed
1019 * Set the link speed in the AUTOC register and restarts link.
1021 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1022 ixgbe_link_speed speed,
1023 bool autoneg_wait_to_complete)
1025 bool autoneg = false;
1026 s32 status = IXGBE_SUCCESS;
1027 u32 pma_pmd_1g, link_mode;
1028 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
1029 u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
1030 u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
1031 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1032 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1035 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
1037 DEBUGFUNC("ixgbe_setup_mac_link_82599");
1039 /* Check to see if speed passed in is supported. */
1040 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
1044 speed &= link_capabilities;
1046 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
1047 status = IXGBE_ERR_LINK_SETUP;
1051 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
1052 if (hw->mac.orig_link_settings_stored)
1053 orig_autoc = hw->mac.orig_autoc;
1057 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
1058 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1060 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1061 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1062 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1063 /* Set KX4/KX/KR support according to speed requested */
1064 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
1065 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1066 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
1067 autoc |= IXGBE_AUTOC_KX4_SUPP;
1068 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
1069 (hw->phy.smart_speed_active == false))
1070 autoc |= IXGBE_AUTOC_KR_SUPP;
1072 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1073 autoc |= IXGBE_AUTOC_KX_SUPP;
1074 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
1075 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
1076 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
1077 /* Switch from 1G SFI to 10G SFI if requested */
1078 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1079 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
1080 autoc &= ~IXGBE_AUTOC_LMS_MASK;
1081 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1083 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1084 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1085 /* Switch from 10G SFI to 1G SFI if requested */
1086 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1087 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
1088 autoc &= ~IXGBE_AUTOC_LMS_MASK;
1089 if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
1090 autoc |= IXGBE_AUTOC_LMS_1G_AN;
1092 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
1096 if (autoc != current_autoc) {
1098 status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
1099 if (status != IXGBE_SUCCESS)
1102 /* Only poll for autoneg to complete if specified to do so */
1103 if (autoneg_wait_to_complete) {
1104 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1105 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1106 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1107 links_reg = 0; /*Just in case Autoneg time=0*/
1108 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
1110 IXGBE_READ_REG(hw, IXGBE_LINKS);
1111 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
1115 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1117 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1118 DEBUGOUT("Autoneg did not complete.\n");
1123 /* Add delay to filter out noises during initial link setup */
1132 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1133 * @hw: pointer to hardware structure
1134 * @speed: new link speed
1135 * @autoneg_wait_to_complete: true if waiting is needed to complete
1137 * Restarts link on PHY and MAC based on settings passed in.
1139 STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1140 ixgbe_link_speed speed,
1141 bool autoneg_wait_to_complete)
1145 DEBUGFUNC("ixgbe_setup_copper_link_82599");
1147 /* Setup the PHY according to input speed */
1148 status = hw->phy.ops.setup_link_speed(hw, speed,
1149 autoneg_wait_to_complete);
1151 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1157 * ixgbe_reset_hw_82599 - Perform hardware reset
1158 * @hw: pointer to hardware structure
1160 * Resets the hardware by resetting the transmit and receive units, masks
1161 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1164 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1166 ixgbe_link_speed link_speed;
1169 u32 i, autoc, autoc2;
1171 bool link_up = false;
1173 DEBUGFUNC("ixgbe_reset_hw_82599");
1175 /* Call adapter stop to disable tx/rx and clear interrupts */
1176 status = hw->mac.ops.stop_adapter(hw);
1177 if (status != IXGBE_SUCCESS)
1180 /* flush pending Tx transactions */
1181 ixgbe_clear_tx_pending(hw);
1183 /* PHY ops must be identified and initialized prior to reset */
1185 /* Identify PHY and related function pointers */
1186 status = hw->phy.ops.init(hw);
1188 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1191 /* Setup SFP module if there is one present. */
1192 if (hw->phy.sfp_setup_needed) {
1193 status = hw->mac.ops.setup_sfp(hw);
1194 hw->phy.sfp_setup_needed = false;
1197 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1201 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
1202 hw->phy.ops.reset(hw);
1204 /* remember AUTOC from before we reset */
1205 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1209 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1210 * If link reset is used when link is up, it might reset the PHY when
1211 * mng is using it. If link is down or the flag to force full link
1212 * reset is set, then perform link reset.
1214 ctrl = IXGBE_CTRL_LNK_RST;
1215 if (!hw->force_full_reset) {
1216 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1218 ctrl = IXGBE_CTRL_RST;
1221 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1222 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1223 IXGBE_WRITE_FLUSH(hw);
1225 /* Poll for reset bit to self-clear meaning reset is complete */
1226 for (i = 0; i < 10; i++) {
1228 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1229 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1233 if (ctrl & IXGBE_CTRL_RST_MASK) {
1234 status = IXGBE_ERR_RESET_FAILED;
1235 DEBUGOUT("Reset polling failed to complete.\n");
1241 * Double resets are required for recovery from certain error
1242 * conditions. Between resets, it is necessary to stall to
1243 * allow time for any pending HW events to complete.
1245 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1246 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1251 * Store the original AUTOC/AUTOC2 values if they have not been
1252 * stored off yet. Otherwise restore the stored original
1253 * values since the reset operation sets back to defaults.
1255 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1256 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1258 /* Enable link if disabled in NVM */
1259 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1260 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1261 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1262 IXGBE_WRITE_FLUSH(hw);
1265 if (hw->mac.orig_link_settings_stored == false) {
1266 hw->mac.orig_autoc = autoc;
1267 hw->mac.orig_autoc2 = autoc2;
1268 hw->mac.orig_link_settings_stored = true;
1271 /* If MNG FW is running on a multi-speed device that
1272 * doesn't autoneg with out driver support we need to
1273 * leave LMS in the state it was before we MAC reset.
1274 * Likewise if we support WoL we don't want change the
1277 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1279 hw->mac.orig_autoc =
1280 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1283 if (autoc != hw->mac.orig_autoc) {
1284 status = hw->mac.ops.prot_autoc_write(hw,
1287 if (status != IXGBE_SUCCESS)
1291 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1292 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1293 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1294 autoc2 |= (hw->mac.orig_autoc2 &
1295 IXGBE_AUTOC2_UPPER_MASK);
1296 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1300 /* Store the permanent mac address */
1301 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1304 * Store MAC address from RAR0, clear receive address registers, and
1305 * clear the multicast table. Also reset num_rar_entries to 128,
1306 * since we modify this value when programming the SAN MAC address.
1308 hw->mac.num_rar_entries = 128;
1309 hw->mac.ops.init_rx_addrs(hw);
1311 /* Store the permanent SAN mac address */
1312 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1314 /* Add the SAN MAC address to the RAR only if it's a valid address */
1315 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1316 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1317 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1319 /* Save the SAN MAC RAR index */
1320 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1322 /* Reserve the last RAR for the SAN MAC address */
1323 hw->mac.num_rar_entries--;
1326 /* Store the alternative WWNN/WWPN prefix */
1327 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1328 &hw->mac.wwpn_prefix);
1335 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
1336 * @hw: pointer to hardware structure
1338 STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw)
1342 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1343 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1344 IXGBE_FDIRCMD_CMD_MASK))
1345 return IXGBE_SUCCESS;
1349 return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1353 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1354 * @hw: pointer to hardware structure
1356 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1360 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1361 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1363 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1366 * Before starting reinitialization process,
1367 * FDIRCMD.CMD must be zero.
1369 err = ixgbe_fdir_check_cmd_complete(hw);
1371 DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
1375 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1376 IXGBE_WRITE_FLUSH(hw);
1378 * 82599 adapters flow director init flow cannot be restarted,
1379 * Workaround 82599 silicon errata by performing the following steps
1380 * before re-writing the FDIRCTRL control register with the same value.
1381 * - write 1 to bit 8 of FDIRCMD register &
1382 * - write 0 to bit 8 of FDIRCMD register
1384 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1385 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1386 IXGBE_FDIRCMD_CLEARHT));
1387 IXGBE_WRITE_FLUSH(hw);
1388 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1389 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1390 ~IXGBE_FDIRCMD_CLEARHT));
1391 IXGBE_WRITE_FLUSH(hw);
1393 * Clear FDIR Hash register to clear any leftover hashes
1394 * waiting to be programmed.
1396 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1397 IXGBE_WRITE_FLUSH(hw);
1399 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1400 IXGBE_WRITE_FLUSH(hw);
1402 /* Poll init-done after we write FDIRCTRL register */
1403 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1404 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1405 IXGBE_FDIRCTRL_INIT_DONE)
1409 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1410 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1411 return IXGBE_ERR_FDIR_REINIT_FAILED;
1414 /* Clear FDIR statistics registers (read to clear) */
1415 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1416 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1417 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1418 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1419 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1421 return IXGBE_SUCCESS;
1425 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1426 * @hw: pointer to hardware structure
1427 * @fdirctrl: value to write to flow director control register
1429 STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1433 DEBUGFUNC("ixgbe_fdir_enable_82599");
1435 /* Prime the keys for hashing */
1436 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1437 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1440 * Poll init-done after we write the register. Estimated times:
1441 * 10G: PBALLOC = 11b, timing is 60us
1442 * 1G: PBALLOC = 11b, timing is 600us
1443 * 100M: PBALLOC = 11b, timing is 6ms
1445 * Multiple these timings by 4 if under full Rx load
1447 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1448 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1449 * this might not finish in our poll time, but we can live with that
1452 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1453 IXGBE_WRITE_FLUSH(hw);
1454 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1455 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1456 IXGBE_FDIRCTRL_INIT_DONE)
1461 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1462 DEBUGOUT("Flow Director poll time exceeded!\n");
1466 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1467 * @hw: pointer to hardware structure
1468 * @fdirctrl: value to write to flow director control register, initially
1469 * contains just the value of the Rx packet buffer allocation
1471 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1473 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1476 * Continue setup of fdirctrl register bits:
1477 * Move the flexible bytes to use the ethertype - shift 6 words
1478 * Set the maximum length per hash bucket to 0xA filters
1479 * Send interrupt when 64 filters are left
1481 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1482 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1483 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1485 /* write hashes and fdirctrl register, poll for completion */
1486 ixgbe_fdir_enable_82599(hw, fdirctrl);
1488 return IXGBE_SUCCESS;
1492 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1493 * @hw: pointer to hardware structure
1494 * @fdirctrl: value to write to flow director control register, initially
1495 * contains just the value of the Rx packet buffer allocation
1496 * @cloud_mode: true - cloud mode, false - other mode
1498 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
1501 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1504 * Continue setup of fdirctrl register bits:
1505 * Turn perfect match filtering on
1506 * Report hash in RSS field of Rx wb descriptor
1507 * Initialize the drop queue
1508 * Move the flexible bytes to use the ethertype - shift 6 words
1509 * Set the maximum length per hash bucket to 0xA filters
1510 * Send interrupt when 64 (0x4 * 16) filters are left
1512 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1513 IXGBE_FDIRCTRL_REPORT_STATUS |
1514 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1515 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1516 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1517 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1520 /* write hashes and fdirctrl register, poll for completion */
1521 ixgbe_fdir_enable_82599(hw, fdirctrl);
1523 return IXGBE_SUCCESS;
1527 * These defines allow us to quickly generate all of the necessary instructions
1528 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1529 * for values 0 through 15
1531 #define IXGBE_ATR_COMMON_HASH_KEY \
1532 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1533 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1536 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1537 common_hash ^= lo_hash_dword >> n; \
1538 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1539 bucket_hash ^= lo_hash_dword >> n; \
1540 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1541 sig_hash ^= lo_hash_dword << (16 - n); \
1542 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1543 common_hash ^= hi_hash_dword >> n; \
1544 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1545 bucket_hash ^= hi_hash_dword >> n; \
1546 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1547 sig_hash ^= hi_hash_dword << (16 - n); \
1551 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1552 * @stream: input bitstream to compute the hash on
1554 * This function is almost identical to the function above but contains
1555 * several optimizations such as unwinding all of the loops, letting the
1556 * compiler work out all of the conditional ifs since the keys are static
1557 * defines, and computing two keys at once since the hashed dword stream
1558 * will be the same for both keys.
1560 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1561 union ixgbe_atr_hash_dword common)
1563 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1564 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1566 /* record the flow_vm_vlan bits as they are a key part to the hash */
1567 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1569 /* generate common hash dword */
1570 hi_hash_dword = IXGBE_NTOHL(common.dword);
1572 /* low dword is word swapped version of common */
1573 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1575 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1576 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1578 /* Process bits 0 and 16 */
1579 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1582 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1583 * delay this because bit 0 of the stream should not be processed
1584 * so we do not add the VLAN until after bit 0 was processed
1586 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1588 /* Process remaining 30 bit of the key */
1589 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1590 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1591 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1592 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1593 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1594 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1595 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1596 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1597 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1598 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1599 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1600 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1601 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1602 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1603 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1605 /* combine common_hash result with signature and bucket hashes */
1606 bucket_hash ^= common_hash;
1607 bucket_hash &= IXGBE_ATR_HASH_MASK;
1609 sig_hash ^= common_hash << 16;
1610 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1612 /* return completed signature hash */
1613 return sig_hash ^ bucket_hash;
1617 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1618 * @hw: pointer to hardware structure
1619 * @input: unique input dword
1620 * @common: compressed common input dword
1621 * @queue: queue index to direct traffic to
1623 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1624 union ixgbe_atr_hash_dword input,
1625 union ixgbe_atr_hash_dword common,
1632 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1635 * Get the flow_type in order to program FDIRCMD properly
1636 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1637 * fifth is FDIRCMD.TUNNEL_FILTER
1639 switch (input.formatted.flow_type) {
1640 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1641 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1642 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1643 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1644 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1645 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1648 DEBUGOUT(" Error on flow type input\n");
1649 return IXGBE_ERR_CONFIG;
1652 /* configure FDIRCMD register */
1653 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1654 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1655 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1656 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1659 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1660 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1662 fdirhashcmd = (u64)fdircmd << 32;
1663 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1664 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1666 err = ixgbe_fdir_check_cmd_complete(hw);
1668 DEBUGOUT("Flow Director command did not complete!\n");
1672 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1674 return IXGBE_SUCCESS;
1677 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1680 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1681 bucket_hash ^= lo_hash_dword >> n; \
1682 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1683 bucket_hash ^= hi_hash_dword >> n; \
1687 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1688 * @atr_input: input bitstream to compute the hash on
1689 * @input_mask: mask for the input bitstream
1691 * This function serves two main purposes. First it applies the input_mask
1692 * to the atr_input resulting in a cleaned up atr_input data stream.
1693 * Secondly it computes the hash and stores it in the bkt_hash field at
1694 * the end of the input byte stream. This way it will be available for
1695 * future use without needing to recompute the hash.
1697 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1698 union ixgbe_atr_input *input_mask)
1701 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1702 u32 bucket_hash = 0;
1706 /* Apply masks to input data */
1707 for (i = 0; i < 14; i++)
1708 input->dword_stream[i] &= input_mask->dword_stream[i];
1710 /* record the flow_vm_vlan bits as they are a key part to the hash */
1711 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1713 /* generate common hash dword */
1714 for (i = 1; i <= 13; i++)
1715 hi_dword ^= input->dword_stream[i];
1716 hi_hash_dword = IXGBE_NTOHL(hi_dword);
1718 /* low dword is word swapped version of common */
1719 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1721 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1722 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1724 /* Process bits 0 and 16 */
1725 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1728 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1729 * delay this because bit 0 of the stream should not be processed
1730 * so we do not add the VLAN until after bit 0 was processed
1732 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1734 /* Process remaining 30 bit of the key */
1735 for (i = 1; i <= 15; i++)
1736 IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1739 * Limit hash to 13 bits since max bucket count is 8K.
1740 * Store result at the end of the input stream.
1742 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1746 * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1747 * @input_mask: mask to be bit swapped
1749 * The source and destination port masks for flow director are bit swapped
1750 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1751 * generate a correctly swapped value we need to bit swap the mask and that
1752 * is what is accomplished by this function.
1754 STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1756 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1757 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1758 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1759 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1760 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1761 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1762 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1766 * These two macros are meant to address the fact that we have registers
1767 * that are either all or in part big-endian. As a result on big-endian
1768 * systems we will end up byte swapping the value to little-endian before
1769 * it is byte swapped again and written to the hardware in the original
1770 * big-endian format.
1772 #define IXGBE_STORE_AS_BE32(_value) \
1773 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1774 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1776 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1777 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1779 #define IXGBE_STORE_AS_BE16(_value) \
1780 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1782 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1783 union ixgbe_atr_input *input_mask, bool cloud_mode)
1785 /* mask IPv6 since it is currently not supported */
1786 u32 fdirm = IXGBE_FDIRM_DIPv6;
1788 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1791 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1792 * are zero, then assume a full mask for that field. Also assume that
1793 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1794 * cannot be masked out in this implementation.
1796 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1800 /* verify bucket hash is cleared on hash generation */
1801 if (input_mask->formatted.bkt_hash)
1802 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1804 /* Program FDIRM and verify partial masks */
1805 switch (input_mask->formatted.vm_pool & 0x7F) {
1807 fdirm |= IXGBE_FDIRM_POOL;
1811 DEBUGOUT(" Error on vm pool mask\n");
1812 return IXGBE_ERR_CONFIG;
1815 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1817 fdirm |= IXGBE_FDIRM_L4P;
1818 if (input_mask->formatted.dst_port ||
1819 input_mask->formatted.src_port) {
1820 DEBUGOUT(" Error on src/dst port mask\n");
1821 return IXGBE_ERR_CONFIG;
1823 case IXGBE_ATR_L4TYPE_MASK:
1826 DEBUGOUT(" Error on flow type mask\n");
1827 return IXGBE_ERR_CONFIG;
1830 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1832 /* mask VLAN ID, fall through to mask VLAN priority */
1833 fdirm |= IXGBE_FDIRM_VLANID;
1835 /* mask VLAN priority */
1836 fdirm |= IXGBE_FDIRM_VLANP;
1839 /* mask VLAN ID only, fall through */
1840 fdirm |= IXGBE_FDIRM_VLANID;
1842 /* no VLAN fields masked */
1845 DEBUGOUT(" Error on VLAN mask\n");
1846 return IXGBE_ERR_CONFIG;
1849 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1851 /* Mask Flex Bytes, fall through */
1852 fdirm |= IXGBE_FDIRM_FLEX;
1856 DEBUGOUT(" Error on flexible byte mask\n");
1857 return IXGBE_ERR_CONFIG;
1861 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1862 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1864 /* store the TCP/UDP port masks, bit reversed from port layout */
1865 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1867 /* write both the same so that UDP and TCP use the same mask */
1868 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1869 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1871 /* store source and destination IP masks (big-endian) */
1872 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1873 ~input_mask->formatted.src_ip[0]);
1874 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1875 ~input_mask->formatted.dst_ip[0]);
1877 return IXGBE_SUCCESS;
1880 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1881 union ixgbe_atr_input *input,
1882 u16 soft_id, u8 queue, bool cloud_mode)
1884 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1886 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1888 /* currently IPv6 is not supported, must be programmed with 0 */
1889 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1890 input->formatted.src_ip[0]);
1891 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1892 input->formatted.src_ip[1]);
1893 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1894 input->formatted.src_ip[2]);
1896 /* record the source address (big-endian) */
1897 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1899 /* record the first 32 bits of the destination address (big-endian) */
1900 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1902 /* record source and destination port (little-endian)*/
1903 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1904 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1905 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1906 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1908 /* record VLAN (little-endian) and flex_bytes(big-endian) */
1909 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1910 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1911 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1912 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1915 /* configure FDIRHASH register */
1916 fdirhash = input->formatted.bkt_hash;
1917 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1918 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1921 * flush all previous writes to make certain registers are
1922 * programmed prior to issuing the command
1924 IXGBE_WRITE_FLUSH(hw);
1926 /* configure FDIRCMD register */
1927 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1928 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1929 if (queue == IXGBE_FDIR_DROP_QUEUE)
1930 fdircmd |= IXGBE_FDIRCMD_DROP;
1931 if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
1932 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1933 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1934 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1935 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1937 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1939 return IXGBE_SUCCESS;
1942 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1943 union ixgbe_atr_input *input,
1950 /* configure FDIRHASH register */
1951 fdirhash = input->formatted.bkt_hash;
1952 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1953 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1955 /* flush hash to HW */
1956 IXGBE_WRITE_FLUSH(hw);
1958 /* Query if filter is present */
1959 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1961 err = ixgbe_fdir_check_cmd_complete(hw);
1963 DEBUGOUT("Flow Director command did not complete!\n");
1967 /* if filter exists in hardware then remove it */
1968 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1969 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1970 IXGBE_WRITE_FLUSH(hw);
1971 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1972 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1975 return IXGBE_SUCCESS;
1979 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1980 * @hw: pointer to hardware structure
1981 * @input: input bitstream
1982 * @input_mask: mask for the input bitstream
1983 * @soft_id: software index for the filters
1984 * @queue: queue index to direct traffic to
1986 * Note that the caller to this function must lock before calling, since the
1987 * hardware writes must be protected from one another.
1989 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1990 union ixgbe_atr_input *input,
1991 union ixgbe_atr_input *input_mask,
1992 u16 soft_id, u8 queue, bool cloud_mode)
1994 s32 err = IXGBE_ERR_CONFIG;
1996 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1999 * Check flow_type formatting, and bail out before we touch the hardware
2000 * if there's a configuration issue
2002 switch (input->formatted.flow_type) {
2003 case IXGBE_ATR_FLOW_TYPE_IPV4:
2004 case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
2005 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
2006 if (input->formatted.dst_port || input->formatted.src_port) {
2007 DEBUGOUT(" Error on src/dst port\n");
2008 return IXGBE_ERR_CONFIG;
2011 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2012 case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
2013 if (input->formatted.dst_port || input->formatted.src_port) {
2014 DEBUGOUT(" Error on src/dst port\n");
2015 return IXGBE_ERR_CONFIG;
2017 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2018 case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
2019 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2020 case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
2021 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2022 IXGBE_ATR_L4TYPE_MASK;
2025 DEBUGOUT(" Error on flow type input\n");
2029 /* program input mask into the HW */
2030 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
2034 /* apply mask and compute/store hash */
2035 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2037 /* program filters to filter memory */
2038 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2039 soft_id, queue, cloud_mode);
2043 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2044 * @hw: pointer to hardware structure
2045 * @reg: analog register to read
2048 * Performs read operation to Omer analog register specified.
2050 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2054 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2056 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2058 IXGBE_WRITE_FLUSH(hw);
2060 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2061 *val = (u8)core_ctl;
2063 return IXGBE_SUCCESS;
2067 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2068 * @hw: pointer to hardware structure
2069 * @reg: atlas register to write
2070 * @val: value to write
2072 * Performs write operation to Omer analog register specified.
2074 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2078 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2080 core_ctl = (reg << 8) | val;
2081 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2082 IXGBE_WRITE_FLUSH(hw);
2085 return IXGBE_SUCCESS;
2089 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2090 * @hw: pointer to hardware structure
2092 * Starts the hardware using the generic start_hw function
2093 * and the generation start_hw function.
2094 * Then performs revision-specific operations, if any.
2096 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2098 s32 ret_val = IXGBE_SUCCESS;
2100 DEBUGFUNC("ixgbe_start_hw_82599");
2102 ret_val = ixgbe_start_hw_generic(hw);
2103 if (ret_val != IXGBE_SUCCESS)
2106 ret_val = ixgbe_start_hw_gen2(hw);
2107 if (ret_val != IXGBE_SUCCESS)
2110 /* We need to run link autotry after the driver loads */
2111 hw->mac.autotry_restart = true;
2113 if (ret_val == IXGBE_SUCCESS)
2114 ret_val = ixgbe_verify_fw_version_82599(hw);
2120 * ixgbe_identify_phy_82599 - Get physical layer module
2121 * @hw: pointer to hardware structure
2123 * Determines the physical layer module found on the current adapter.
2124 * If PHY already detected, maintains current PHY type in hw struct,
2125 * otherwise executes the PHY detection routine.
2127 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2131 DEBUGFUNC("ixgbe_identify_phy_82599");
2133 /* Detect PHY if not unknown - returns success if already detected. */
2134 status = ixgbe_identify_phy_generic(hw);
2135 if (status != IXGBE_SUCCESS) {
2136 /* 82599 10GBASE-T requires an external PHY */
2137 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2140 status = ixgbe_identify_module_generic(hw);
2143 /* Set PHY type none if no PHY detected */
2144 if (hw->phy.type == ixgbe_phy_unknown) {
2145 hw->phy.type = ixgbe_phy_none;
2146 return IXGBE_SUCCESS;
2149 /* Return error if SFP module has been detected but is not supported */
2150 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2151 return IXGBE_ERR_SFP_NOT_SUPPORTED;
2157 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2158 * @hw: pointer to hardware structure
2160 * Determines physical layer capabilities of the current configuration.
2162 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2164 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2165 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2166 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2167 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2168 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2169 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2170 u16 ext_ability = 0;
2172 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2174 hw->phy.ops.identify(hw);
2176 switch (hw->phy.type) {
2178 case ixgbe_phy_cu_unknown:
2179 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2180 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2181 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2182 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2183 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2184 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2185 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2186 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2192 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2193 case IXGBE_AUTOC_LMS_1G_AN:
2194 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2195 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2196 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2197 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2200 /* SFI mode so read SFP module */
2203 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2204 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2205 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2206 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2207 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2208 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2209 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2212 case IXGBE_AUTOC_LMS_10G_SERIAL:
2213 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2214 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2216 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2219 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2220 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2221 if (autoc & IXGBE_AUTOC_KX_SUPP)
2222 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2223 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2224 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2225 if (autoc & IXGBE_AUTOC_KR_SUPP)
2226 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2235 /* SFP check must be done last since DA modules are sometimes used to
2236 * test KR mode - we need to id KR mode correctly before SFP module.
2237 * Call identify_sfp because the pluggable module may have changed */
2238 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2240 return physical_layer;
2244 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2245 * @hw: pointer to hardware structure
2246 * @regval: register value to write to RXCTRL
2248 * Enables the Rx DMA unit for 82599
2250 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2253 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2256 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2257 * If traffic is incoming before we enable the Rx unit, it could hang
2258 * the Rx DMA unit. Therefore, make sure the security engine is
2259 * completely disabled prior to enabling the Rx unit.
2262 hw->mac.ops.disable_sec_rx_path(hw);
2264 if (regval & IXGBE_RXCTRL_RXEN)
2265 ixgbe_enable_rx(hw);
2267 ixgbe_disable_rx(hw);
2269 hw->mac.ops.enable_sec_rx_path(hw);
2271 return IXGBE_SUCCESS;
2275 * ixgbe_verify_fw_version_82599 - verify FW version for 82599
2276 * @hw: pointer to hardware structure
2278 * Verifies that installed the firmware version is 0.6 or higher
2279 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2281 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2282 * if the FW version is not supported.
2284 STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2286 s32 status = IXGBE_ERR_EEPROM_VERSION;
2287 u16 fw_offset, fw_ptp_cfg_offset;
2290 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2292 /* firmware check is only necessary for SFI devices */
2293 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2294 status = IXGBE_SUCCESS;
2295 goto fw_version_out;
2298 /* get the offset to the Firmware Module block */
2299 if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2300 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2301 "eeprom read at offset %d failed", IXGBE_FW_PTR);
2302 return IXGBE_ERR_EEPROM_VERSION;
2305 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2306 goto fw_version_out;
2308 /* get the offset to the Pass Through Patch Configuration block */
2309 if (hw->eeprom.ops.read(hw, (fw_offset +
2310 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2311 &fw_ptp_cfg_offset)) {
2312 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2313 "eeprom read at offset %d failed",
2315 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2316 return IXGBE_ERR_EEPROM_VERSION;
2319 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2320 goto fw_version_out;
2322 /* get the firmware version */
2323 if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2324 IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2325 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2326 "eeprom read at offset %d failed",
2327 fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2328 return IXGBE_ERR_EEPROM_VERSION;
2331 if (fw_version > 0x5)
2332 status = IXGBE_SUCCESS;
2339 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2340 * @hw: pointer to hardware structure
2342 * Returns true if the LESM FW module is present and enabled. Otherwise
2343 * returns false. Smart Speed must be disabled if LESM FW module is enabled.
2345 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2347 bool lesm_enabled = false;
2348 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2351 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2353 /* get the offset to the Firmware Module block */
2354 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2356 if ((status != IXGBE_SUCCESS) ||
2357 (fw_offset == 0) || (fw_offset == 0xFFFF))
2360 /* get the offset to the LESM Parameters block */
2361 status = hw->eeprom.ops.read(hw, (fw_offset +
2362 IXGBE_FW_LESM_PARAMETERS_PTR),
2363 &fw_lesm_param_offset);
2365 if ((status != IXGBE_SUCCESS) ||
2366 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2369 /* get the LESM state word */
2370 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2371 IXGBE_FW_LESM_STATE_1),
2374 if ((status == IXGBE_SUCCESS) &&
2375 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2376 lesm_enabled = true;
2379 return lesm_enabled;
2383 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2384 * fastest available method
2386 * @hw: pointer to hardware structure
2387 * @offset: offset of word in EEPROM to read
2388 * @words: number of words
2389 * @data: word(s) read from the EEPROM
2391 * Retrieves 16 bit word(s) read from EEPROM
2393 STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2394 u16 words, u16 *data)
2396 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2397 s32 ret_val = IXGBE_ERR_CONFIG;
2399 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2402 * If EEPROM is detected and can be addressed using 14 bits,
2403 * use EERD otherwise use bit bang
2405 if ((eeprom->type == ixgbe_eeprom_spi) &&
2406 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2407 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2410 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2418 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2419 * fastest available method
2421 * @hw: pointer to hardware structure
2422 * @offset: offset of word in the EEPROM to read
2423 * @data: word read from the EEPROM
2425 * Reads a 16 bit word from the EEPROM
2427 STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2428 u16 offset, u16 *data)
2430 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2431 s32 ret_val = IXGBE_ERR_CONFIG;
2433 DEBUGFUNC("ixgbe_read_eeprom_82599");
2436 * If EEPROM is detected and can be addressed using 14 bits,
2437 * use EERD otherwise use bit bang
2439 if ((eeprom->type == ixgbe_eeprom_spi) &&
2440 (offset <= IXGBE_EERD_MAX_ADDR))
2441 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2443 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2449 * ixgbe_reset_pipeline_82599 - perform pipeline reset
2451 * @hw: pointer to hardware structure
2453 * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2454 * full pipeline reset. This function assumes the SW/FW lock is held.
2456 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2460 u32 i, autoc_reg, autoc2_reg;
2462 /* Enable link if disabled in NVM */
2463 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2464 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2465 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2466 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2467 IXGBE_WRITE_FLUSH(hw);
2470 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2471 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2472 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2473 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2474 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2475 /* Wait for AN to leave state 0 */
2476 for (i = 0; i < 10; i++) {
2478 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2479 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2483 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2484 DEBUGOUT("auto negotiation not completed\n");
2485 ret_val = IXGBE_ERR_RESET_FAILED;
2486 goto reset_pipeline_out;
2489 ret_val = IXGBE_SUCCESS;
2492 /* Write AUTOC register with original LMS field and Restart_AN */
2493 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2494 IXGBE_WRITE_FLUSH(hw);
2501 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2502 * @hw: pointer to hardware structure
2503 * @byte_offset: byte offset to read
2506 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2507 * a specified device address.
2509 STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2510 u8 dev_addr, u8 *data)
2516 DEBUGFUNC("ixgbe_read_i2c_byte_82599");
2518 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2519 /* Acquire I2C bus ownership. */
2520 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2521 esdp |= IXGBE_ESDP_SDP0;
2522 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2523 IXGBE_WRITE_FLUSH(hw);
2526 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2527 if (esdp & IXGBE_ESDP_SDP1)
2535 DEBUGOUT("Driver can't access resource,"
2536 " acquiring I2C bus timeout.\n");
2537 status = IXGBE_ERR_I2C;
2538 goto release_i2c_access;
2542 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2546 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2547 /* Release I2C bus ownership. */
2548 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2549 esdp &= ~IXGBE_ESDP_SDP0;
2550 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2551 IXGBE_WRITE_FLUSH(hw);
2558 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2559 * @hw: pointer to hardware structure
2560 * @byte_offset: byte offset to write
2561 * @data: value to write
2563 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2564 * a specified device address.
2566 STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2567 u8 dev_addr, u8 data)
2573 DEBUGFUNC("ixgbe_write_i2c_byte_82599");
2575 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2576 /* Acquire I2C bus ownership. */
2577 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2578 esdp |= IXGBE_ESDP_SDP0;
2579 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2580 IXGBE_WRITE_FLUSH(hw);
2583 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2584 if (esdp & IXGBE_ESDP_SDP1)
2592 DEBUGOUT("Driver can't access resource,"
2593 " acquiring I2C bus timeout.\n");
2594 status = IXGBE_ERR_I2C;
2595 goto release_i2c_access;
2599 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2603 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2604 /* Release I2C bus ownership. */
2605 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2606 esdp &= ~IXGBE_ESDP_SDP0;
2607 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2608 IXGBE_WRITE_FLUSH(hw);