1 /*******************************************************************************
3 Copyright (c) 2001-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_type.h"
35 #include "ixgbe_82599.h"
36 #include "ixgbe_api.h"
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
39 #ident "$Id: ixgbe_82599.c,v 1.334 2013/12/04 22:34:00 jtkirshe Exp $"
41 #define IXGBE_82599_MAX_TX_QUEUES 128
42 #define IXGBE_82599_MAX_RX_QUEUES 128
43 #define IXGBE_82599_RAR_ENTRIES 128
44 #define IXGBE_82599_MC_TBL_SIZE 128
45 #define IXGBE_82599_VFT_TBL_SIZE 128
46 #define IXGBE_82599_RX_PB_SIZE 512
48 STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
49 ixgbe_link_speed speed,
50 bool autoneg_wait_to_complete);
51 STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
52 STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
53 u16 offset, u16 *data);
54 STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
55 u16 words, u16 *data);
56 STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
57 u8 dev_addr, u8 *data);
58 STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
59 u8 dev_addr, u8 data);
61 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
63 struct ixgbe_mac_info *mac = &hw->mac;
65 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
68 * enable the laser control functions for SFP+ fiber
71 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
72 !ixgbe_mng_enabled(hw)) {
73 mac->ops.disable_tx_laser =
74 &ixgbe_disable_tx_laser_multispeed_fiber;
75 mac->ops.enable_tx_laser =
76 &ixgbe_enable_tx_laser_multispeed_fiber;
77 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
80 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL;
82 mac->ops.flap_tx_laser = NULL;
85 if (hw->phy.multispeed_fiber) {
86 /* Set up dual speed SFP+ support */
87 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
89 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
90 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
91 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
92 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
93 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
95 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
101 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
102 * @hw: pointer to hardware structure
104 * Initialize any function pointers that were not able to be
105 * set during init_shared_code because the PHY/SFP type was
106 * not known. Perform the SFP init if necessary.
109 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
111 struct ixgbe_mac_info *mac = &hw->mac;
112 struct ixgbe_phy_info *phy = &hw->phy;
113 s32 ret_val = IXGBE_SUCCESS;
116 DEBUGFUNC("ixgbe_init_phy_ops_82599");
118 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
119 /* Store flag indicating I2C bus access control unit. */
120 hw->phy.qsfp_shared_i2c_bus = TRUE;
122 /* Initialize access to QSFP+ I2C bus */
123 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
124 esdp |= IXGBE_ESDP_SDP0_DIR;
125 esdp &= ~IXGBE_ESDP_SDP1_DIR;
126 esdp &= ~IXGBE_ESDP_SDP0;
127 esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
128 esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
129 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
130 IXGBE_WRITE_FLUSH(hw);
132 phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
133 phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
135 /* Identify the PHY or SFP module */
136 ret_val = phy->ops.identify(hw);
137 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
138 goto init_phy_ops_out;
140 /* Setup function pointers based on detected SFP module and speeds */
141 ixgbe_init_mac_link_ops_82599(hw);
142 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
143 hw->phy.ops.reset = NULL;
145 /* If copper media, overwrite with copper function pointers */
146 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
147 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
148 mac->ops.get_link_capabilities =
149 &ixgbe_get_copper_link_capabilities_generic;
152 /* Set necessary function pointers based on PHY type */
153 switch (hw->phy.type) {
155 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
156 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
157 phy->ops.get_firmware_version =
158 &ixgbe_get_phy_firmware_version_tnx;
167 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
169 s32 ret_val = IXGBE_SUCCESS;
170 u16 list_offset, data_offset, data_value;
172 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
174 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
175 ixgbe_init_mac_link_ops_82599(hw);
177 hw->phy.ops.reset = NULL;
179 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
181 if (ret_val != IXGBE_SUCCESS)
184 /* PHY config will finish before releasing the semaphore */
185 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
186 IXGBE_GSSR_MAC_CSR_SM);
187 if (ret_val != IXGBE_SUCCESS) {
188 ret_val = IXGBE_ERR_SWFW_SYNC;
192 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
194 while (data_value != 0xffff) {
195 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
196 IXGBE_WRITE_FLUSH(hw);
197 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
201 /* Release the semaphore */
202 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
203 /* Delay obtaining semaphore again to allow FW access
204 * prot_autoc_write uses the semaphore too.
206 msec_delay(hw->eeprom.semaphore_delay);
208 /* Restart DSP and set SFI mode */
209 ret_val = hw->mac.ops.prot_autoc_write(hw,
210 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
214 DEBUGOUT("sfp module setup not complete\n");
215 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
225 /* Release the semaphore */
226 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
227 /* Delay obtaining semaphore again to allow FW access */
228 msec_delay(hw->eeprom.semaphore_delay);
229 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
230 "eeprom read at offset %d failed", data_offset);
231 return IXGBE_ERR_PHY;
235 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
236 * @hw: pointer to hardware structure
237 * @locked: Return the if we locked for this read.
238 * @reg_val: Value we read from AUTOC
240 * For this part (82599) we need to wrap read-modify-writes with a possible
241 * FW/SW lock. It is assumed this lock will be freed with the next
242 * prot_autoc_write_82599().
244 s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
249 /* If LESM is on then we need to hold the SW/FW semaphore. */
250 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
251 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
252 IXGBE_GSSR_MAC_CSR_SM);
253 if (ret_val != IXGBE_SUCCESS)
254 return IXGBE_ERR_SWFW_SYNC;
259 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
260 return IXGBE_SUCCESS;
264 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
265 * @hw: pointer to hardware structure
266 * @reg_val: value to write to AUTOC
267 * @locked: bool to indicate whether the SW/FW lock was already taken by
268 * previous proc_autoc_read_82599.
270 * This part (82599) may need to hold the SW/FW lock around all writes to
271 * AUTOC. Likewise after a write we need to do a pipeline reset.
273 s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
275 s32 ret_val = IXGBE_SUCCESS;
277 /* Blocked by MNG FW so bail */
278 if (ixgbe_check_reset_blocked(hw))
281 /* We only need to get the lock if:
282 * - We didn't do it already (in the read part of a read-modify-write)
285 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
286 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
287 IXGBE_GSSR_MAC_CSR_SM);
288 if (ret_val != IXGBE_SUCCESS)
289 return IXGBE_ERR_SWFW_SYNC;
294 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
295 ret_val = ixgbe_reset_pipeline_82599(hw);
298 /* Free the SW/FW semaphore as we either grabbed it here or
299 * already had it when this function was called.
302 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
308 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
309 * @hw: pointer to hardware structure
311 * Initialize the function pointers and assign the MAC type for 82599.
312 * Does not touch the hardware.
315 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
317 struct ixgbe_mac_info *mac = &hw->mac;
318 struct ixgbe_phy_info *phy = &hw->phy;
319 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
322 DEBUGFUNC("ixgbe_init_ops_82599");
324 ixgbe_init_phy_ops_generic(hw);
325 ret_val = ixgbe_init_ops_generic(hw);
328 phy->ops.identify = &ixgbe_identify_phy_82599;
329 phy->ops.init = &ixgbe_init_phy_ops_82599;
332 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
333 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
334 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
335 mac->ops.get_supported_physical_layer =
336 &ixgbe_get_supported_physical_layer_82599;
337 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
338 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
339 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
340 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
341 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
342 mac->ops.start_hw = &ixgbe_start_hw_82599;
343 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
344 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
345 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
346 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
347 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
348 mac->ops.prot_autoc_read = &prot_autoc_read_82599;
349 mac->ops.prot_autoc_write = &prot_autoc_write_82599;
351 /* RAR, Multicast, VLAN */
352 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
353 mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
354 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
355 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
356 mac->rar_highwater = 1;
357 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
358 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
359 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
360 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
361 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
362 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
363 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
366 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
367 mac->ops.check_link = &ixgbe_check_mac_link_generic;
368 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
369 ixgbe_init_mac_link_ops_82599(hw);
371 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
372 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
373 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
374 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
375 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
376 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
377 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
379 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
380 IXGBE_FWSM_MODE_MASK) ? true : false;
382 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
385 eeprom->ops.read = &ixgbe_read_eeprom_82599;
386 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
388 /* Manageability interface */
389 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
392 mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
398 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
399 * @hw: pointer to hardware structure
400 * @speed: pointer to link speed
401 * @autoneg: true when autoneg or autotry is enabled
403 * Determines the link capabilities by reading the AUTOC register.
405 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
406 ixgbe_link_speed *speed,
409 s32 status = IXGBE_SUCCESS;
412 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
415 /* Check if 1G SFP module. */
416 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
417 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
418 #ifdef SUPPORT_1000BASE_LX
419 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
420 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
422 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
423 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
424 *speed = IXGBE_LINK_SPEED_1GB_FULL;
430 * Determine link capabilities based on the stored value of AUTOC,
431 * which represents EEPROM defaults. If AUTOC value has not
432 * been stored, use the current register values.
434 if (hw->mac.orig_link_settings_stored)
435 autoc = hw->mac.orig_autoc;
437 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
439 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
440 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
441 *speed = IXGBE_LINK_SPEED_1GB_FULL;
445 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
446 *speed = IXGBE_LINK_SPEED_10GB_FULL;
450 case IXGBE_AUTOC_LMS_1G_AN:
451 *speed = IXGBE_LINK_SPEED_1GB_FULL;
455 case IXGBE_AUTOC_LMS_10G_SERIAL:
456 *speed = IXGBE_LINK_SPEED_10GB_FULL;
460 case IXGBE_AUTOC_LMS_KX4_KX_KR:
461 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
462 *speed = IXGBE_LINK_SPEED_UNKNOWN;
463 if (autoc & IXGBE_AUTOC_KR_SUPP)
464 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
465 if (autoc & IXGBE_AUTOC_KX4_SUPP)
466 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
467 if (autoc & IXGBE_AUTOC_KX_SUPP)
468 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
472 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
473 *speed = IXGBE_LINK_SPEED_100_FULL;
474 if (autoc & IXGBE_AUTOC_KR_SUPP)
475 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
476 if (autoc & IXGBE_AUTOC_KX4_SUPP)
477 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
478 if (autoc & IXGBE_AUTOC_KX_SUPP)
479 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
483 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
484 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
489 status = IXGBE_ERR_LINK_SETUP;
494 if (hw->phy.multispeed_fiber) {
495 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
496 IXGBE_LINK_SPEED_1GB_FULL;
498 /* QSFP must not enable full auto-negotiation
499 * Limited autoneg is enabled at 1G
501 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
512 * ixgbe_get_media_type_82599 - Get media type
513 * @hw: pointer to hardware structure
515 * Returns the media type (fiber, copper, backplane)
517 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
519 enum ixgbe_media_type media_type;
521 DEBUGFUNC("ixgbe_get_media_type_82599");
523 /* Detect if there is a copper PHY attached. */
524 switch (hw->phy.type) {
525 case ixgbe_phy_cu_unknown:
527 media_type = ixgbe_media_type_copper;
533 switch (hw->device_id) {
534 case IXGBE_DEV_ID_82599_KX4:
535 case IXGBE_DEV_ID_82599_KX4_MEZZ:
536 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
537 case IXGBE_DEV_ID_82599_KR:
538 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
539 case IXGBE_DEV_ID_82599_XAUI_LOM:
540 /* Default device ID is mezzanine card KX/KX4 */
541 media_type = ixgbe_media_type_backplane;
543 case IXGBE_DEV_ID_82599_SFP:
544 case IXGBE_DEV_ID_82599_SFP_FCOE:
545 case IXGBE_DEV_ID_82599_SFP_EM:
546 case IXGBE_DEV_ID_82599_SFP_SF2:
547 case IXGBE_DEV_ID_82599_SFP_SF_QP:
548 case IXGBE_DEV_ID_82599EN_SFP:
549 media_type = ixgbe_media_type_fiber;
551 case IXGBE_DEV_ID_82599_CX4:
552 media_type = ixgbe_media_type_cx4;
554 case IXGBE_DEV_ID_82599_T3_LOM:
555 media_type = ixgbe_media_type_copper;
557 case IXGBE_DEV_ID_82599_LS:
558 media_type = ixgbe_media_type_fiber_lco;
560 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
561 media_type = ixgbe_media_type_fiber_qsfp;
564 media_type = ixgbe_media_type_unknown;
572 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
573 * @hw: pointer to hardware structure
575 * Disables link during D3 power down sequence.
578 void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
580 u32 autoc2_reg, fwsm;
583 DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
584 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
586 /* Check to see if MNG FW could be enabled */
587 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
589 if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
591 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
592 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
593 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
594 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
599 * ixgbe_start_mac_link_82599 - Setup MAC link settings
600 * @hw: pointer to hardware structure
601 * @autoneg_wait_to_complete: true when waiting for completion is needed
603 * Configures link settings based on values in the ixgbe_hw struct.
604 * Restarts the link. Performs autonegotiation if needed.
606 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
607 bool autoneg_wait_to_complete)
612 s32 status = IXGBE_SUCCESS;
613 bool got_lock = false;
615 DEBUGFUNC("ixgbe_start_mac_link_82599");
618 /* reset_pipeline requires us to hold this lock as it writes to
621 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
622 status = hw->mac.ops.acquire_swfw_sync(hw,
623 IXGBE_GSSR_MAC_CSR_SM);
624 if (status != IXGBE_SUCCESS)
631 ixgbe_reset_pipeline_82599(hw);
634 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
636 /* Only poll for autoneg to complete if specified to do so */
637 if (autoneg_wait_to_complete) {
638 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
639 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
640 IXGBE_AUTOC_LMS_KX4_KX_KR ||
641 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
642 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
643 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
644 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
645 links_reg = 0; /* Just in case Autoneg time = 0 */
646 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
647 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
648 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
652 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
653 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
654 DEBUGOUT("Autoneg did not complete.\n");
659 /* Add delay to filter out noises during initial link setup */
667 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
668 * @hw: pointer to hardware structure
670 * The base drivers may require better control over SFP+ module
671 * PHY states. This includes selectively shutting down the Tx
672 * laser on the PHY, effectively halting physical link.
674 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
676 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
678 /* Blocked by MNG FW so bail */
679 if (ixgbe_check_reset_blocked(hw))
682 /* Disable Tx laser; allow 100us to go dark per spec */
683 esdp_reg |= IXGBE_ESDP_SDP3;
684 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
685 IXGBE_WRITE_FLUSH(hw);
690 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
691 * @hw: pointer to hardware structure
693 * The base drivers may require better control over SFP+ module
694 * PHY states. This includes selectively turning on the Tx
695 * laser on the PHY, effectively starting physical link.
697 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
699 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
701 /* Enable Tx laser; allow 100ms to light up */
702 esdp_reg &= ~IXGBE_ESDP_SDP3;
703 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
704 IXGBE_WRITE_FLUSH(hw);
709 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
710 * @hw: pointer to hardware structure
712 * When the driver changes the link speeds that it can support,
713 * it sets autotry_restart to true to indicate that we need to
714 * initiate a new autotry session with the link partner. To do
715 * so, we set the speed then disable and re-enable the Tx laser, to
716 * alert the link partner that it also needs to restart autotry on its
717 * end. This is consistent with true clause 37 autoneg, which also
718 * involves a loss of signal.
720 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
722 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
724 /* Blocked by MNG FW so bail */
725 if (ixgbe_check_reset_blocked(hw))
728 if (hw->mac.autotry_restart) {
729 ixgbe_disable_tx_laser_multispeed_fiber(hw);
730 ixgbe_enable_tx_laser_multispeed_fiber(hw);
731 hw->mac.autotry_restart = false;
737 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
738 * @hw: pointer to hardware structure
739 * @speed: new link speed
740 * @autoneg_wait_to_complete: true when waiting for completion is needed
742 * Set the link speed in the AUTOC register and restarts link.
744 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
745 ixgbe_link_speed speed,
746 bool autoneg_wait_to_complete)
748 s32 status = IXGBE_SUCCESS;
749 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
750 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
752 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
754 bool autoneg, link_up = false;
756 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
758 /* Mask off requested but non-supported speeds */
759 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
760 if (status != IXGBE_SUCCESS)
766 * Try each speed one by one, highest priority first. We do this in
767 * software because 10gb fiber doesn't support speed autonegotiation.
769 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
771 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
773 /* If we already have link at this speed, just jump out */
774 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
775 if (status != IXGBE_SUCCESS)
778 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
781 /* Set the module link speed */
782 switch (hw->phy.media_type) {
783 case ixgbe_media_type_fiber:
784 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
785 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
786 IXGBE_WRITE_FLUSH(hw);
788 case ixgbe_media_type_fiber_qsfp:
789 /* QSFP module automatically detects MAC link speed */
792 DEBUGOUT("Unexpected media type.\n");
796 /* Allow module to change analog characteristics (1G->10G) */
799 status = ixgbe_setup_mac_link_82599(hw,
800 IXGBE_LINK_SPEED_10GB_FULL,
801 autoneg_wait_to_complete);
802 if (status != IXGBE_SUCCESS)
805 /* Flap the tx laser if it has not already been done */
806 ixgbe_flap_tx_laser(hw);
809 * Wait for the controller to acquire link. Per IEEE 802.3ap,
810 * Section 73.10.2, we may have to wait up to 500ms if KR is
811 * attempted. 82599 uses the same timing for 10g SFI.
813 for (i = 0; i < 5; i++) {
814 /* Wait for the link partner to also set speed */
817 /* If we have link, just jump out */
818 status = ixgbe_check_link(hw, &link_speed,
820 if (status != IXGBE_SUCCESS)
828 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
830 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
831 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
833 /* If we already have link at this speed, just jump out */
834 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
835 if (status != IXGBE_SUCCESS)
838 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
841 /* Set the module link speed */
842 switch (hw->phy.media_type) {
843 case ixgbe_media_type_fiber:
844 esdp_reg &= ~IXGBE_ESDP_SDP5;
845 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
846 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
847 IXGBE_WRITE_FLUSH(hw);
849 case ixgbe_media_type_fiber_qsfp:
850 /* QSFP module automatically detects link speed */
853 DEBUGOUT("Unexpected media type.\n");
857 /* Allow module to change analog characteristics (10G->1G) */
860 status = ixgbe_setup_mac_link_82599(hw,
861 IXGBE_LINK_SPEED_1GB_FULL,
862 autoneg_wait_to_complete);
863 if (status != IXGBE_SUCCESS)
866 /* Flap the Tx laser if it has not already been done */
867 ixgbe_flap_tx_laser(hw);
869 /* Wait for the link partner to also set speed */
872 /* If we have link, just jump out */
873 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
874 if (status != IXGBE_SUCCESS)
882 * We didn't get link. Configure back to the highest speed we tried,
883 * (if there was more than one). We call ourselves back with just the
884 * single highest speed that the user requested.
887 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
888 highest_link_speed, autoneg_wait_to_complete);
891 /* Set autoneg_advertised value based on input link speed */
892 hw->phy.autoneg_advertised = 0;
894 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
895 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
897 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
898 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
904 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
905 * @hw: pointer to hardware structure
906 * @speed: new link speed
907 * @autoneg_wait_to_complete: true when waiting for completion is needed
909 * Implements the Intel SmartSpeed algorithm.
911 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
912 ixgbe_link_speed speed,
913 bool autoneg_wait_to_complete)
915 s32 status = IXGBE_SUCCESS;
916 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
918 bool link_up = false;
919 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
921 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
923 /* Set autoneg_advertised value based on input link speed */
924 hw->phy.autoneg_advertised = 0;
926 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
927 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
929 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
930 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
932 if (speed & IXGBE_LINK_SPEED_100_FULL)
933 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
936 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
937 * autoneg advertisement if link is unable to be established at the
938 * highest negotiated rate. This can sometimes happen due to integrity
939 * issues with the physical media connection.
942 /* First, try to get link with full advertisement */
943 hw->phy.smart_speed_active = false;
944 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
945 status = ixgbe_setup_mac_link_82599(hw, speed,
946 autoneg_wait_to_complete);
947 if (status != IXGBE_SUCCESS)
951 * Wait for the controller to acquire link. Per IEEE 802.3ap,
952 * Section 73.10.2, we may have to wait up to 500ms if KR is
953 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
954 * Table 9 in the AN MAS.
956 for (i = 0; i < 5; i++) {
959 /* If we have link, just jump out */
960 status = ixgbe_check_link(hw, &link_speed, &link_up,
962 if (status != IXGBE_SUCCESS)
971 * We didn't get link. If we advertised KR plus one of KX4/KX
972 * (or BX4/BX), then disable KR and try again.
974 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
975 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
978 /* Turn SmartSpeed on to disable KR support */
979 hw->phy.smart_speed_active = true;
980 status = ixgbe_setup_mac_link_82599(hw, speed,
981 autoneg_wait_to_complete);
982 if (status != IXGBE_SUCCESS)
986 * Wait for the controller to acquire link. 600ms will allow for
987 * the AN link_fail_inhibit_timer as well for multiple cycles of
988 * parallel detect, both 10g and 1g. This allows for the maximum
989 * connect attempts as defined in the AN MAS table 73-7.
991 for (i = 0; i < 6; i++) {
994 /* If we have link, just jump out */
995 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
996 if (status != IXGBE_SUCCESS)
1003 /* We didn't get link. Turn SmartSpeed back off. */
1004 hw->phy.smart_speed_active = false;
1005 status = ixgbe_setup_mac_link_82599(hw, speed,
1006 autoneg_wait_to_complete);
1009 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
1010 DEBUGOUT("Smartspeed has downgraded the link speed "
1011 "from the maximum advertised\n");
1016 * ixgbe_setup_mac_link_82599 - Set MAC link speed
1017 * @hw: pointer to hardware structure
1018 * @speed: new link speed
1019 * @autoneg_wait_to_complete: true when waiting for completion is needed
1021 * Set the link speed in the AUTOC register and restarts link.
1023 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1024 ixgbe_link_speed speed,
1025 bool autoneg_wait_to_complete)
1027 bool autoneg = false;
1028 s32 status = IXGBE_SUCCESS;
1029 u32 pma_pmd_1g, link_mode;
1030 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */
1031 u32 orig_autoc = 0; /* holds the cached value of AUTOC register */
1032 u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */
1033 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1034 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1037 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
1039 DEBUGFUNC("ixgbe_setup_mac_link_82599");
1041 /* Check to see if speed passed in is supported. */
1042 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
1046 speed &= link_capabilities;
1048 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
1049 status = IXGBE_ERR_LINK_SETUP;
1053 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
1054 if (hw->mac.orig_link_settings_stored)
1055 orig_autoc = hw->mac.orig_autoc;
1059 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
1060 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1062 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1063 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1064 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1065 /* Set KX4/KX/KR support according to speed requested */
1066 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
1067 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1068 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
1069 autoc |= IXGBE_AUTOC_KX4_SUPP;
1070 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
1071 (hw->phy.smart_speed_active == false))
1072 autoc |= IXGBE_AUTOC_KR_SUPP;
1074 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1075 autoc |= IXGBE_AUTOC_KX_SUPP;
1076 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
1077 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
1078 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
1079 /* Switch from 1G SFI to 10G SFI if requested */
1080 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1081 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
1082 autoc &= ~IXGBE_AUTOC_LMS_MASK;
1083 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1085 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1086 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1087 /* Switch from 10G SFI to 1G SFI if requested */
1088 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1089 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
1090 autoc &= ~IXGBE_AUTOC_LMS_MASK;
1091 if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel)
1092 autoc |= IXGBE_AUTOC_LMS_1G_AN;
1094 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
1098 if (autoc != current_autoc) {
1100 status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
1101 if (status != IXGBE_SUCCESS)
1104 /* Only poll for autoneg to complete if specified to do so */
1105 if (autoneg_wait_to_complete) {
1106 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1107 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1108 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1109 links_reg = 0; /*Just in case Autoneg time=0*/
1110 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
1112 IXGBE_READ_REG(hw, IXGBE_LINKS);
1113 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
1117 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1119 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1120 DEBUGOUT("Autoneg did not complete.\n");
1125 /* Add delay to filter out noises during initial link setup */
1134 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1135 * @hw: pointer to hardware structure
1136 * @speed: new link speed
1137 * @autoneg_wait_to_complete: true if waiting is needed to complete
1139 * Restarts link on PHY and MAC based on settings passed in.
1141 STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1142 ixgbe_link_speed speed,
1143 bool autoneg_wait_to_complete)
1147 DEBUGFUNC("ixgbe_setup_copper_link_82599");
1149 /* Setup the PHY according to input speed */
1150 status = hw->phy.ops.setup_link_speed(hw, speed,
1151 autoneg_wait_to_complete);
1153 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1159 * ixgbe_reset_hw_82599 - Perform hardware reset
1160 * @hw: pointer to hardware structure
1162 * Resets the hardware by resetting the transmit and receive units, masks
1163 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1166 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1168 ixgbe_link_speed link_speed;
1171 u32 i, autoc, autoc2;
1173 bool link_up = false;
1175 DEBUGFUNC("ixgbe_reset_hw_82599");
1177 /* Call adapter stop to disable tx/rx and clear interrupts */
1178 status = hw->mac.ops.stop_adapter(hw);
1179 if (status != IXGBE_SUCCESS)
1182 /* flush pending Tx transactions */
1183 ixgbe_clear_tx_pending(hw);
1185 /* PHY ops must be identified and initialized prior to reset */
1187 /* Identify PHY and related function pointers */
1188 status = hw->phy.ops.init(hw);
1190 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1193 /* Setup SFP module if there is one present. */
1194 if (hw->phy.sfp_setup_needed) {
1195 status = hw->mac.ops.setup_sfp(hw);
1196 hw->phy.sfp_setup_needed = false;
1199 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1203 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
1204 hw->phy.ops.reset(hw);
1206 /* remember AUTOC from before we reset */
1207 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1211 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1212 * If link reset is used when link is up, it might reset the PHY when
1213 * mng is using it. If link is down or the flag to force full link
1214 * reset is set, then perform link reset.
1216 ctrl = IXGBE_CTRL_LNK_RST;
1217 if (!hw->force_full_reset) {
1218 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1220 ctrl = IXGBE_CTRL_RST;
1223 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1224 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1225 IXGBE_WRITE_FLUSH(hw);
1227 /* Poll for reset bit to self-clear meaning reset is complete */
1228 for (i = 0; i < 10; i++) {
1230 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1231 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1235 if (ctrl & IXGBE_CTRL_RST_MASK) {
1236 status = IXGBE_ERR_RESET_FAILED;
1237 DEBUGOUT("Reset polling failed to complete.\n");
1243 * Double resets are required for recovery from certain error
1244 * conditions. Between resets, it is necessary to stall to
1245 * allow time for any pending HW events to complete.
1247 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1248 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1253 * Store the original AUTOC/AUTOC2 values if they have not been
1254 * stored off yet. Otherwise restore the stored original
1255 * values since the reset operation sets back to defaults.
1257 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1258 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1260 /* Enable link if disabled in NVM */
1261 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1262 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1263 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1264 IXGBE_WRITE_FLUSH(hw);
1267 if (hw->mac.orig_link_settings_stored == false) {
1268 hw->mac.orig_autoc = autoc;
1269 hw->mac.orig_autoc2 = autoc2;
1270 hw->mac.orig_link_settings_stored = true;
1273 /* If MNG FW is running on a multi-speed device that
1274 * doesn't autoneg with out driver support we need to
1275 * leave LMS in the state it was before we MAC reset.
1276 * Likewise if we support WoL we don't want change the
1279 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1281 hw->mac.orig_autoc =
1282 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1285 if (autoc != hw->mac.orig_autoc) {
1286 status = hw->mac.ops.prot_autoc_write(hw,
1289 if (status != IXGBE_SUCCESS)
1293 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1294 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1295 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1296 autoc2 |= (hw->mac.orig_autoc2 &
1297 IXGBE_AUTOC2_UPPER_MASK);
1298 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1302 /* Store the permanent mac address */
1303 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1306 * Store MAC address from RAR0, clear receive address registers, and
1307 * clear the multicast table. Also reset num_rar_entries to 128,
1308 * since we modify this value when programming the SAN MAC address.
1310 hw->mac.num_rar_entries = 128;
1311 hw->mac.ops.init_rx_addrs(hw);
1313 /* Store the permanent SAN mac address */
1314 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1316 /* Add the SAN MAC address to the RAR only if it's a valid address */
1317 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1318 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1319 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1321 /* Save the SAN MAC RAR index */
1322 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1324 /* Reserve the last RAR for the SAN MAC address */
1325 hw->mac.num_rar_entries--;
1328 /* Store the alternative WWNN/WWPN prefix */
1329 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1330 &hw->mac.wwpn_prefix);
1337 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
1338 * @hw: pointer to hardware structure
1340 STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw)
1344 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1345 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1346 IXGBE_FDIRCMD_CMD_MASK))
1347 return IXGBE_SUCCESS;
1351 return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1355 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1356 * @hw: pointer to hardware structure
1358 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1362 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1363 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1365 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1368 * Before starting reinitialization process,
1369 * FDIRCMD.CMD must be zero.
1371 err = ixgbe_fdir_check_cmd_complete(hw);
1373 DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
1377 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1378 IXGBE_WRITE_FLUSH(hw);
1380 * 82599 adapters flow director init flow cannot be restarted,
1381 * Workaround 82599 silicon errata by performing the following steps
1382 * before re-writing the FDIRCTRL control register with the same value.
1383 * - write 1 to bit 8 of FDIRCMD register &
1384 * - write 0 to bit 8 of FDIRCMD register
1386 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1387 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1388 IXGBE_FDIRCMD_CLEARHT));
1389 IXGBE_WRITE_FLUSH(hw);
1390 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1391 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1392 ~IXGBE_FDIRCMD_CLEARHT));
1393 IXGBE_WRITE_FLUSH(hw);
1395 * Clear FDIR Hash register to clear any leftover hashes
1396 * waiting to be programmed.
1398 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1399 IXGBE_WRITE_FLUSH(hw);
1401 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1402 IXGBE_WRITE_FLUSH(hw);
1404 /* Poll init-done after we write FDIRCTRL register */
1405 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1406 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1407 IXGBE_FDIRCTRL_INIT_DONE)
1411 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1412 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1413 return IXGBE_ERR_FDIR_REINIT_FAILED;
1416 /* Clear FDIR statistics registers (read to clear) */
1417 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1418 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1419 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1420 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1421 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1423 return IXGBE_SUCCESS;
1427 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1428 * @hw: pointer to hardware structure
1429 * @fdirctrl: value to write to flow director control register
1431 STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1435 DEBUGFUNC("ixgbe_fdir_enable_82599");
1437 /* Prime the keys for hashing */
1438 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1439 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1442 * Poll init-done after we write the register. Estimated times:
1443 * 10G: PBALLOC = 11b, timing is 60us
1444 * 1G: PBALLOC = 11b, timing is 600us
1445 * 100M: PBALLOC = 11b, timing is 6ms
1447 * Multiple these timings by 4 if under full Rx load
1449 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1450 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1451 * this might not finish in our poll time, but we can live with that
1454 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1455 IXGBE_WRITE_FLUSH(hw);
1456 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1457 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1458 IXGBE_FDIRCTRL_INIT_DONE)
1463 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1464 DEBUGOUT("Flow Director poll time exceeded!\n");
1468 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1469 * @hw: pointer to hardware structure
1470 * @fdirctrl: value to write to flow director control register, initially
1471 * contains just the value of the Rx packet buffer allocation
1473 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1475 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1478 * Continue setup of fdirctrl register bits:
1479 * Move the flexible bytes to use the ethertype - shift 6 words
1480 * Set the maximum length per hash bucket to 0xA filters
1481 * Send interrupt when 64 filters are left
1483 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1484 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1485 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1487 /* write hashes and fdirctrl register, poll for completion */
1488 ixgbe_fdir_enable_82599(hw, fdirctrl);
1490 return IXGBE_SUCCESS;
1494 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1495 * @hw: pointer to hardware structure
1496 * @fdirctrl: value to write to flow director control register, initially
1497 * contains just the value of the Rx packet buffer allocation
1498 * @cloud_mode: true - cloud mode, false - other mode
1500 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
1503 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1506 * Continue setup of fdirctrl register bits:
1507 * Turn perfect match filtering on
1508 * Report hash in RSS field of Rx wb descriptor
1509 * Initialize the drop queue
1510 * Move the flexible bytes to use the ethertype - shift 6 words
1511 * Set the maximum length per hash bucket to 0xA filters
1512 * Send interrupt when 64 (0x4 * 16) filters are left
1514 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1515 IXGBE_FDIRCTRL_REPORT_STATUS |
1516 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1517 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1518 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1519 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1522 /* write hashes and fdirctrl register, poll for completion */
1523 ixgbe_fdir_enable_82599(hw, fdirctrl);
1525 return IXGBE_SUCCESS;
1529 * These defines allow us to quickly generate all of the necessary instructions
1530 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1531 * for values 0 through 15
1533 #define IXGBE_ATR_COMMON_HASH_KEY \
1534 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1535 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1538 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1539 common_hash ^= lo_hash_dword >> n; \
1540 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1541 bucket_hash ^= lo_hash_dword >> n; \
1542 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1543 sig_hash ^= lo_hash_dword << (16 - n); \
1544 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1545 common_hash ^= hi_hash_dword >> n; \
1546 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1547 bucket_hash ^= hi_hash_dword >> n; \
1548 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1549 sig_hash ^= hi_hash_dword << (16 - n); \
1553 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1554 * @stream: input bitstream to compute the hash on
1556 * This function is almost identical to the function above but contains
1557 * several optimizations such as unwinding all of the loops, letting the
1558 * compiler work out all of the conditional ifs since the keys are static
1559 * defines, and computing two keys at once since the hashed dword stream
1560 * will be the same for both keys.
1562 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1563 union ixgbe_atr_hash_dword common)
1565 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1566 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1568 /* record the flow_vm_vlan bits as they are a key part to the hash */
1569 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1571 /* generate common hash dword */
1572 hi_hash_dword = IXGBE_NTOHL(common.dword);
1574 /* low dword is word swapped version of common */
1575 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1577 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1578 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1580 /* Process bits 0 and 16 */
1581 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1584 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1585 * delay this because bit 0 of the stream should not be processed
1586 * so we do not add the VLAN until after bit 0 was processed
1588 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1590 /* Process remaining 30 bit of the key */
1591 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1592 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1593 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1594 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1595 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1596 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1597 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1598 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1599 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1600 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1601 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1602 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1603 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1604 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1605 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1607 /* combine common_hash result with signature and bucket hashes */
1608 bucket_hash ^= common_hash;
1609 bucket_hash &= IXGBE_ATR_HASH_MASK;
1611 sig_hash ^= common_hash << 16;
1612 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1614 /* return completed signature hash */
1615 return sig_hash ^ bucket_hash;
1619 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1620 * @hw: pointer to hardware structure
1621 * @input: unique input dword
1622 * @common: compressed common input dword
1623 * @queue: queue index to direct traffic to
1625 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1626 union ixgbe_atr_hash_dword input,
1627 union ixgbe_atr_hash_dword common,
1634 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1637 * Get the flow_type in order to program FDIRCMD properly
1638 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1639 * fifth is FDIRCMD.TUNNEL_FILTER
1641 switch (input.formatted.flow_type) {
1642 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1643 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1644 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1645 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1646 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1647 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1650 DEBUGOUT(" Error on flow type input\n");
1651 return IXGBE_ERR_CONFIG;
1654 /* configure FDIRCMD register */
1655 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1656 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1657 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1658 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1661 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1662 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1664 fdirhashcmd = (u64)fdircmd << 32;
1665 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1666 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1668 err = ixgbe_fdir_check_cmd_complete(hw);
1670 DEBUGOUT("Flow Director command did not complete!\n");
1674 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1676 return IXGBE_SUCCESS;
1679 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1682 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1683 bucket_hash ^= lo_hash_dword >> n; \
1684 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1685 bucket_hash ^= hi_hash_dword >> n; \
1689 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1690 * @atr_input: input bitstream to compute the hash on
1691 * @input_mask: mask for the input bitstream
1693 * This function serves two main purposes. First it applies the input_mask
1694 * to the atr_input resulting in a cleaned up atr_input data stream.
1695 * Secondly it computes the hash and stores it in the bkt_hash field at
1696 * the end of the input byte stream. This way it will be available for
1697 * future use without needing to recompute the hash.
1699 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1700 union ixgbe_atr_input *input_mask)
1703 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1704 u32 bucket_hash = 0;
1708 /* Apply masks to input data */
1709 for (i = 0; i < 14; i++)
1710 input->dword_stream[i] &= input_mask->dword_stream[i];
1712 /* record the flow_vm_vlan bits as they are a key part to the hash */
1713 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1715 /* generate common hash dword */
1716 for (i = 1; i <= 13; i++)
1717 hi_dword ^= input->dword_stream[i];
1718 hi_hash_dword = IXGBE_NTOHL(hi_dword);
1720 /* low dword is word swapped version of common */
1721 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1723 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1724 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1726 /* Process bits 0 and 16 */
1727 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1730 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1731 * delay this because bit 0 of the stream should not be processed
1732 * so we do not add the VLAN until after bit 0 was processed
1734 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1736 /* Process remaining 30 bit of the key */
1737 for (i = 1; i <= 15; i++)
1738 IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1741 * Limit hash to 13 bits since max bucket count is 8K.
1742 * Store result at the end of the input stream.
1744 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1748 * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks
1749 * @input_mask: mask to be bit swapped
1751 * The source and destination port masks for flow director are bit swapped
1752 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1753 * generate a correctly swapped value we need to bit swap the mask and that
1754 * is what is accomplished by this function.
1756 STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1758 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1759 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1760 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1761 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1762 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1763 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1764 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1768 * These two macros are meant to address the fact that we have registers
1769 * that are either all or in part big-endian. As a result on big-endian
1770 * systems we will end up byte swapping the value to little-endian before
1771 * it is byte swapped again and written to the hardware in the original
1772 * big-endian format.
1774 #define IXGBE_STORE_AS_BE32(_value) \
1775 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1776 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1778 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1779 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1781 #define IXGBE_STORE_AS_BE16(_value) \
1782 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1784 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1785 union ixgbe_atr_input *input_mask, bool cloud_mode)
1787 /* mask IPv6 since it is currently not supported */
1788 u32 fdirm = IXGBE_FDIRM_DIPv6;
1790 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1793 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1794 * are zero, then assume a full mask for that field. Also assume that
1795 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1796 * cannot be masked out in this implementation.
1798 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1802 /* verify bucket hash is cleared on hash generation */
1803 if (input_mask->formatted.bkt_hash)
1804 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1806 /* Program FDIRM and verify partial masks */
1807 switch (input_mask->formatted.vm_pool & 0x7F) {
1809 fdirm |= IXGBE_FDIRM_POOL;
1813 DEBUGOUT(" Error on vm pool mask\n");
1814 return IXGBE_ERR_CONFIG;
1817 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1819 fdirm |= IXGBE_FDIRM_L4P;
1820 if (input_mask->formatted.dst_port ||
1821 input_mask->formatted.src_port) {
1822 DEBUGOUT(" Error on src/dst port mask\n");
1823 return IXGBE_ERR_CONFIG;
1825 case IXGBE_ATR_L4TYPE_MASK:
1828 DEBUGOUT(" Error on flow type mask\n");
1829 return IXGBE_ERR_CONFIG;
1832 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1834 /* mask VLAN ID, fall through to mask VLAN priority */
1835 fdirm |= IXGBE_FDIRM_VLANID;
1837 /* mask VLAN priority */
1838 fdirm |= IXGBE_FDIRM_VLANP;
1841 /* mask VLAN ID only, fall through */
1842 fdirm |= IXGBE_FDIRM_VLANID;
1844 /* no VLAN fields masked */
1847 DEBUGOUT(" Error on VLAN mask\n");
1848 return IXGBE_ERR_CONFIG;
1851 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1853 /* Mask Flex Bytes, fall through */
1854 fdirm |= IXGBE_FDIRM_FLEX;
1858 DEBUGOUT(" Error on flexible byte mask\n");
1859 return IXGBE_ERR_CONFIG;
1863 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1864 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1866 /* store the TCP/UDP port masks, bit reversed from port layout */
1867 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1869 /* write both the same so that UDP and TCP use the same mask */
1870 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1871 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1873 /* store source and destination IP masks (big-endian) */
1874 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1875 ~input_mask->formatted.src_ip[0]);
1876 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1877 ~input_mask->formatted.dst_ip[0]);
1879 return IXGBE_SUCCESS;
1882 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1883 union ixgbe_atr_input *input,
1884 u16 soft_id, u8 queue, bool cloud_mode)
1886 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1888 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1890 /* currently IPv6 is not supported, must be programmed with 0 */
1891 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1892 input->formatted.src_ip[0]);
1893 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1894 input->formatted.src_ip[1]);
1895 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1896 input->formatted.src_ip[2]);
1898 /* record the source address (big-endian) */
1899 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1901 /* record the first 32 bits of the destination address (big-endian) */
1902 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1904 /* record source and destination port (little-endian)*/
1905 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1906 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1907 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1908 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1910 /* record VLAN (little-endian) and flex_bytes(big-endian) */
1911 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1912 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1913 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1914 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1917 /* configure FDIRHASH register */
1918 fdirhash = input->formatted.bkt_hash;
1919 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1920 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1923 * flush all previous writes to make certain registers are
1924 * programmed prior to issuing the command
1926 IXGBE_WRITE_FLUSH(hw);
1928 /* configure FDIRCMD register */
1929 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1930 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1931 if (queue == IXGBE_FDIR_DROP_QUEUE)
1932 fdircmd |= IXGBE_FDIRCMD_DROP;
1933 if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK)
1934 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1935 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1936 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1937 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1939 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1941 return IXGBE_SUCCESS;
1944 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1945 union ixgbe_atr_input *input,
1952 /* configure FDIRHASH register */
1953 fdirhash = input->formatted.bkt_hash;
1954 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1955 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1957 /* flush hash to HW */
1958 IXGBE_WRITE_FLUSH(hw);
1960 /* Query if filter is present */
1961 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1963 err = ixgbe_fdir_check_cmd_complete(hw);
1965 DEBUGOUT("Flow Director command did not complete!\n");
1969 /* if filter exists in hardware then remove it */
1970 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1971 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1972 IXGBE_WRITE_FLUSH(hw);
1973 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1974 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1977 return IXGBE_SUCCESS;
1981 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1982 * @hw: pointer to hardware structure
1983 * @input: input bitstream
1984 * @input_mask: mask for the input bitstream
1985 * @soft_id: software index for the filters
1986 * @queue: queue index to direct traffic to
1988 * Note that the caller to this function must lock before calling, since the
1989 * hardware writes must be protected from one another.
1991 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1992 union ixgbe_atr_input *input,
1993 union ixgbe_atr_input *input_mask,
1994 u16 soft_id, u8 queue, bool cloud_mode)
1996 s32 err = IXGBE_ERR_CONFIG;
1998 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
2001 * Check flow_type formatting, and bail out before we touch the hardware
2002 * if there's a configuration issue
2004 switch (input->formatted.flow_type) {
2005 case IXGBE_ATR_FLOW_TYPE_IPV4:
2006 case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4:
2007 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
2008 if (input->formatted.dst_port || input->formatted.src_port) {
2009 DEBUGOUT(" Error on src/dst port\n");
2010 return IXGBE_ERR_CONFIG;
2013 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2014 case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4:
2015 if (input->formatted.dst_port || input->formatted.src_port) {
2016 DEBUGOUT(" Error on src/dst port\n");
2017 return IXGBE_ERR_CONFIG;
2019 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2020 case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
2021 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2022 case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4:
2023 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2024 IXGBE_ATR_L4TYPE_MASK;
2027 DEBUGOUT(" Error on flow type input\n");
2031 /* program input mask into the HW */
2032 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode);
2036 /* apply mask and compute/store hash */
2037 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2039 /* program filters to filter memory */
2040 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2041 soft_id, queue, cloud_mode);
2045 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2046 * @hw: pointer to hardware structure
2047 * @reg: analog register to read
2050 * Performs read operation to Omer analog register specified.
2052 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2056 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2058 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2060 IXGBE_WRITE_FLUSH(hw);
2062 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2063 *val = (u8)core_ctl;
2065 return IXGBE_SUCCESS;
2069 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2070 * @hw: pointer to hardware structure
2071 * @reg: atlas register to write
2072 * @val: value to write
2074 * Performs write operation to Omer analog register specified.
2076 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2080 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2082 core_ctl = (reg << 8) | val;
2083 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2084 IXGBE_WRITE_FLUSH(hw);
2087 return IXGBE_SUCCESS;
2091 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2092 * @hw: pointer to hardware structure
2094 * Starts the hardware using the generic start_hw function
2095 * and the generation start_hw function.
2096 * Then performs revision-specific operations, if any.
2098 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2100 s32 ret_val = IXGBE_SUCCESS;
2102 DEBUGFUNC("ixgbe_start_hw_82599");
2104 ret_val = ixgbe_start_hw_generic(hw);
2105 if (ret_val != IXGBE_SUCCESS)
2108 ret_val = ixgbe_start_hw_gen2(hw);
2109 if (ret_val != IXGBE_SUCCESS)
2112 /* We need to run link autotry after the driver loads */
2113 hw->mac.autotry_restart = true;
2115 if (ret_val == IXGBE_SUCCESS)
2116 ret_val = ixgbe_verify_fw_version_82599(hw);
2122 * ixgbe_identify_phy_82599 - Get physical layer module
2123 * @hw: pointer to hardware structure
2125 * Determines the physical layer module found on the current adapter.
2126 * If PHY already detected, maintains current PHY type in hw struct,
2127 * otherwise executes the PHY detection routine.
2129 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2133 DEBUGFUNC("ixgbe_identify_phy_82599");
2135 /* Detect PHY if not unknown - returns success if already detected. */
2136 status = ixgbe_identify_phy_generic(hw);
2137 if (status != IXGBE_SUCCESS) {
2138 /* 82599 10GBASE-T requires an external PHY */
2139 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2142 status = ixgbe_identify_module_generic(hw);
2145 /* Set PHY type none if no PHY detected */
2146 if (hw->phy.type == ixgbe_phy_unknown) {
2147 hw->phy.type = ixgbe_phy_none;
2148 return IXGBE_SUCCESS;
2151 /* Return error if SFP module has been detected but is not supported */
2152 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2153 return IXGBE_ERR_SFP_NOT_SUPPORTED;
2159 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2160 * @hw: pointer to hardware structure
2162 * Determines physical layer capabilities of the current configuration.
2164 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2166 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2167 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2168 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2169 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2170 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2171 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2172 u16 ext_ability = 0;
2174 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2176 hw->phy.ops.identify(hw);
2178 switch (hw->phy.type) {
2180 case ixgbe_phy_cu_unknown:
2181 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2182 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2183 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2184 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2185 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2186 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2187 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2188 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2194 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2195 case IXGBE_AUTOC_LMS_1G_AN:
2196 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2197 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2198 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2199 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2202 /* SFI mode so read SFP module */
2205 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2206 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2207 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2208 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2209 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2210 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2211 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2214 case IXGBE_AUTOC_LMS_10G_SERIAL:
2215 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2216 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2218 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2221 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2222 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2223 if (autoc & IXGBE_AUTOC_KX_SUPP)
2224 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2225 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2226 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2227 if (autoc & IXGBE_AUTOC_KR_SUPP)
2228 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2237 /* SFP check must be done last since DA modules are sometimes used to
2238 * test KR mode - we need to id KR mode correctly before SFP module.
2239 * Call identify_sfp because the pluggable module may have changed */
2240 physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw);
2242 return physical_layer;
2246 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2247 * @hw: pointer to hardware structure
2248 * @regval: register value to write to RXCTRL
2250 * Enables the Rx DMA unit for 82599
2252 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2255 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2258 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2259 * If traffic is incoming before we enable the Rx unit, it could hang
2260 * the Rx DMA unit. Therefore, make sure the security engine is
2261 * completely disabled prior to enabling the Rx unit.
2264 hw->mac.ops.disable_sec_rx_path(hw);
2266 if (regval & IXGBE_RXCTRL_RXEN)
2267 ixgbe_enable_rx(hw);
2269 ixgbe_disable_rx(hw);
2271 hw->mac.ops.enable_sec_rx_path(hw);
2273 return IXGBE_SUCCESS;
2277 * ixgbe_verify_fw_version_82599 - verify FW version for 82599
2278 * @hw: pointer to hardware structure
2280 * Verifies that installed the firmware version is 0.6 or higher
2281 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2283 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2284 * if the FW version is not supported.
2286 STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2288 s32 status = IXGBE_ERR_EEPROM_VERSION;
2289 u16 fw_offset, fw_ptp_cfg_offset;
2292 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2294 /* firmware check is only necessary for SFI devices */
2295 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2296 status = IXGBE_SUCCESS;
2297 goto fw_version_out;
2300 /* get the offset to the Firmware Module block */
2301 if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
2302 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2303 "eeprom read at offset %d failed", IXGBE_FW_PTR);
2304 return IXGBE_ERR_EEPROM_VERSION;
2307 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2308 goto fw_version_out;
2310 /* get the offset to the Pass Through Patch Configuration block */
2311 if (hw->eeprom.ops.read(hw, (fw_offset +
2312 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2313 &fw_ptp_cfg_offset)) {
2314 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2315 "eeprom read at offset %d failed",
2317 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
2318 return IXGBE_ERR_EEPROM_VERSION;
2321 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2322 goto fw_version_out;
2324 /* get the firmware version */
2325 if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2326 IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
2327 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
2328 "eeprom read at offset %d failed",
2329 fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
2330 return IXGBE_ERR_EEPROM_VERSION;
2333 if (fw_version > 0x5)
2334 status = IXGBE_SUCCESS;
2341 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2342 * @hw: pointer to hardware structure
2344 * Returns true if the LESM FW module is present and enabled. Otherwise
2345 * returns false. Smart Speed must be disabled if LESM FW module is enabled.
2347 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2349 bool lesm_enabled = false;
2350 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2353 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2355 /* get the offset to the Firmware Module block */
2356 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2358 if ((status != IXGBE_SUCCESS) ||
2359 (fw_offset == 0) || (fw_offset == 0xFFFF))
2362 /* get the offset to the LESM Parameters block */
2363 status = hw->eeprom.ops.read(hw, (fw_offset +
2364 IXGBE_FW_LESM_PARAMETERS_PTR),
2365 &fw_lesm_param_offset);
2367 if ((status != IXGBE_SUCCESS) ||
2368 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2371 /* get the LESM state word */
2372 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2373 IXGBE_FW_LESM_STATE_1),
2376 if ((status == IXGBE_SUCCESS) &&
2377 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2378 lesm_enabled = true;
2381 return lesm_enabled;
2385 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2386 * fastest available method
2388 * @hw: pointer to hardware structure
2389 * @offset: offset of word in EEPROM to read
2390 * @words: number of words
2391 * @data: word(s) read from the EEPROM
2393 * Retrieves 16 bit word(s) read from EEPROM
2395 STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2396 u16 words, u16 *data)
2398 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2399 s32 ret_val = IXGBE_ERR_CONFIG;
2401 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2404 * If EEPROM is detected and can be addressed using 14 bits,
2405 * use EERD otherwise use bit bang
2407 if ((eeprom->type == ixgbe_eeprom_spi) &&
2408 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2409 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2412 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2420 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2421 * fastest available method
2423 * @hw: pointer to hardware structure
2424 * @offset: offset of word in the EEPROM to read
2425 * @data: word read from the EEPROM
2427 * Reads a 16 bit word from the EEPROM
2429 STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2430 u16 offset, u16 *data)
2432 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2433 s32 ret_val = IXGBE_ERR_CONFIG;
2435 DEBUGFUNC("ixgbe_read_eeprom_82599");
2438 * If EEPROM is detected and can be addressed using 14 bits,
2439 * use EERD otherwise use bit bang
2441 if ((eeprom->type == ixgbe_eeprom_spi) &&
2442 (offset <= IXGBE_EERD_MAX_ADDR))
2443 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2445 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2451 * ixgbe_reset_pipeline_82599 - perform pipeline reset
2453 * @hw: pointer to hardware structure
2455 * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2456 * full pipeline reset. This function assumes the SW/FW lock is held.
2458 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2462 u32 i, autoc_reg, autoc2_reg;
2464 /* Enable link if disabled in NVM */
2465 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2466 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2467 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2468 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2469 IXGBE_WRITE_FLUSH(hw);
2472 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2473 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2474 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2475 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2476 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2477 /* Wait for AN to leave state 0 */
2478 for (i = 0; i < 10; i++) {
2480 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2481 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2485 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2486 DEBUGOUT("auto negotiation not completed\n");
2487 ret_val = IXGBE_ERR_RESET_FAILED;
2488 goto reset_pipeline_out;
2491 ret_val = IXGBE_SUCCESS;
2494 /* Write AUTOC register with original LMS field and Restart_AN */
2495 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2496 IXGBE_WRITE_FLUSH(hw);
2503 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
2504 * @hw: pointer to hardware structure
2505 * @byte_offset: byte offset to read
2508 * Performs byte read operation to SFP module's EEPROM over I2C interface at
2509 * a specified device address.
2511 STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2512 u8 dev_addr, u8 *data)
2518 DEBUGFUNC("ixgbe_read_i2c_byte_82599");
2520 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2521 /* Acquire I2C bus ownership. */
2522 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2523 esdp |= IXGBE_ESDP_SDP0;
2524 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2525 IXGBE_WRITE_FLUSH(hw);
2528 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2529 if (esdp & IXGBE_ESDP_SDP1)
2537 DEBUGOUT("Driver can't access resource,"
2538 " acquiring I2C bus timeout.\n");
2539 status = IXGBE_ERR_I2C;
2540 goto release_i2c_access;
2544 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2548 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2549 /* Release I2C bus ownership. */
2550 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2551 esdp &= ~IXGBE_ESDP_SDP0;
2552 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2553 IXGBE_WRITE_FLUSH(hw);
2560 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
2561 * @hw: pointer to hardware structure
2562 * @byte_offset: byte offset to write
2563 * @data: value to write
2565 * Performs byte write operation to SFP module's EEPROM over I2C interface at
2566 * a specified device address.
2568 STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2569 u8 dev_addr, u8 data)
2575 DEBUGFUNC("ixgbe_write_i2c_byte_82599");
2577 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2578 /* Acquire I2C bus ownership. */
2579 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2580 esdp |= IXGBE_ESDP_SDP0;
2581 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2582 IXGBE_WRITE_FLUSH(hw);
2585 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2586 if (esdp & IXGBE_ESDP_SDP1)
2594 DEBUGOUT("Driver can't access resource,"
2595 " acquiring I2C bus timeout.\n");
2596 status = IXGBE_ERR_I2C;
2597 goto release_i2c_access;
2601 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2605 if (hw->phy.qsfp_shared_i2c_bus == TRUE) {
2606 /* Release I2C bus ownership. */
2607 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2608 esdp &= ~IXGBE_ESDP_SDP0;
2609 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2610 IXGBE_WRITE_FLUSH(hw);