1 /*******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 /* 82562G 10/100 Network Connection
35 * 82562G-2 10/100 Network Connection
36 * 82562GT 10/100 Network Connection
37 * 82562GT-2 10/100 Network Connection
38 * 82562V 10/100 Network Connection
39 * 82562V-2 10/100 Network Connection
40 * 82566DC-2 Gigabit Network Connection
41 * 82566DC Gigabit Network Connection
42 * 82566DM-2 Gigabit Network Connection
43 * 82566DM Gigabit Network Connection
44 * 82566MC Gigabit Network Connection
45 * 82566MM Gigabit Network Connection
46 * 82567LM Gigabit Network Connection
47 * 82567LF Gigabit Network Connection
48 * 82567V Gigabit Network Connection
49 * 82567LM-2 Gigabit Network Connection
50 * 82567LF-2 Gigabit Network Connection
51 * 82567V-2 Gigabit Network Connection
52 * 82567LF-3 Gigabit Network Connection
53 * 82567LM-3 Gigabit Network Connection
54 * 82567LM-4 Gigabit Network Connection
55 * 82577LM Gigabit Network Connection
56 * 82577LC Gigabit Network Connection
57 * 82578DM Gigabit Network Connection
58 * 82578DC Gigabit Network Connection
59 * 82579LM Gigabit Network Connection
60 * 82579V Gigabit Network Connection
61 * Ethernet Connection I217-LM
62 * Ethernet Connection I217-V
63 * Ethernet Connection I218-V
64 * Ethernet Connection I218-LM
67 #include "e1000_api.h"
69 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
70 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
71 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
72 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
73 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
74 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
75 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
76 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
77 STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
78 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
79 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
80 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
83 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
84 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
85 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
86 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
87 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
89 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
91 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
92 u16 words, u16 *data);
93 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 u16 words, u16 *data);
95 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
96 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
97 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
99 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
100 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
101 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
102 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
103 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
104 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
105 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
106 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
107 u16 *speed, u16 *duplex);
108 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
109 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
110 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
111 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
112 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
113 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
114 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw);
115 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw);
116 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
117 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
118 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
119 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
120 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
121 u32 offset, u8 *data);
122 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
124 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
125 u32 offset, u16 *data);
126 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
127 u32 offset, u8 byte);
128 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
129 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
130 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
131 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
132 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
133 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
135 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
136 /* Offset 04h HSFSTS */
137 union ich8_hws_flash_status {
139 u16 flcdone:1; /* bit 0 Flash Cycle Done */
140 u16 flcerr:1; /* bit 1 Flash Cycle Error */
141 u16 dael:1; /* bit 2 Direct Access error Log */
142 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
143 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
144 u16 reserved1:2; /* bit 13:6 Reserved */
145 u16 reserved2:6; /* bit 13:6 Reserved */
146 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
147 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
153 /* Offset 06h FLCTL */
154 union ich8_hws_flash_ctrl {
155 struct ich8_hsflctl {
156 u16 flcgo:1; /* 0 Flash Cycle Go */
157 u16 flcycle:2; /* 2:1 Flash Cycle */
158 u16 reserved:5; /* 7:3 Reserved */
159 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
160 u16 flockdn:6; /* 15:10 Reserved */
165 /* ICH Flash Region Access Permissions */
166 union ich8_hws_flash_regacc {
168 u32 grra:8; /* 0:7 GbE region Read Access */
169 u32 grwa:8; /* 8:15 GbE region Write Access */
170 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
171 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
177 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
178 * @hw: pointer to the HW structure
180 * Test access to the PHY registers by reading the PHY ID registers. If
181 * the PHY ID is already known (e.g. resume path) compare it with known ID,
182 * otherwise assume the read PHY ID is correct if it is valid.
184 * Assumes the sw/fw/hw semaphore is already acquired.
186 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
194 for (retry_count = 0; retry_count < 2; retry_count++) {
195 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
196 if (ret_val || (phy_reg == 0xFFFF))
198 phy_id = (u32)(phy_reg << 16);
200 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
201 if (ret_val || (phy_reg == 0xFFFF)) {
205 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210 if (hw->phy.id == phy_id)
214 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
218 /* In case the PHY needs to be in mdio slow mode,
219 * set slow mode and try to get the PHY id again.
221 if (hw->mac.type < e1000_pch_lpt) {
222 hw->phy.ops.release(hw);
223 ret_val = e1000_set_mdio_slow_mode_hv(hw);
225 ret_val = e1000_get_phy_id(hw);
226 hw->phy.ops.acquire(hw);
232 if (hw->mac.type == e1000_pch_lpt) {
233 /* Unforce SMBus mode in PHY */
234 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
235 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
236 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
238 /* Unforce SMBus mode in MAC */
239 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
240 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
241 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
248 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
249 * @hw: pointer to the HW structure
251 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
252 * used to reset the PHY to a quiescent state when necessary.
254 void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
258 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
260 /* Set Phy Config Counter to 50msec */
261 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
262 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
263 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
264 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
266 /* Toggle LANPHYPC Value bit */
267 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
268 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
269 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
270 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
271 E1000_WRITE_FLUSH(hw);
273 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
274 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
275 E1000_WRITE_FLUSH(hw);
277 if (hw->mac.type < e1000_pch_lpt) {
284 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
285 E1000_CTRL_EXT_LPCD) && count--);
292 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
293 * @hw: pointer to the HW structure
295 * Workarounds/flow necessary for PHY initialization during driver load
298 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
300 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
303 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
305 /* Gate automatic PHY configuration by hardware on managed and
306 * non-managed 82579 and newer adapters.
308 e1000_gate_hw_phy_config_ich8lan(hw, true);
310 #if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
311 /* It is not possible to be certain of the current state of ULP
312 * so forcibly disable it.
314 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
316 #endif /* NAHUM6LP_HW && ULP_SUPPORT */
317 ret_val = hw->phy.ops.acquire(hw);
319 DEBUGOUT("Failed to initialize PHY flow\n");
323 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
324 * inaccessible and resetting the PHY is not blocked, toggle the
325 * LANPHYPC Value bit to force the interconnect to PCIe mode.
327 switch (hw->mac.type) {
329 if (e1000_phy_is_accessible_pchlan(hw))
332 /* Before toggling LANPHYPC, see if PHY is accessible by
333 * forcing MAC to SMBus mode first.
335 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
336 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
337 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
339 /* Wait 50 milliseconds for MAC to finish any retries
340 * that it might be trying to perform from previous
341 * attempts to acknowledge any phy read requests.
347 if (e1000_phy_is_accessible_pchlan(hw))
352 if ((hw->mac.type == e1000_pchlan) &&
353 (fwsm & E1000_ICH_FWSM_FW_VALID))
356 if (hw->phy.ops.check_reset_block(hw)) {
357 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
358 ret_val = -E1000_ERR_PHY;
362 /* Toggle LANPHYPC Value bit */
363 e1000_toggle_lanphypc_pch_lpt(hw);
364 if (hw->mac.type >= e1000_pch_lpt) {
365 if (e1000_phy_is_accessible_pchlan(hw))
368 /* Toggling LANPHYPC brings the PHY out of SMBus mode
369 * so ensure that the MAC is also out of SMBus mode
371 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
372 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
373 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
375 if (e1000_phy_is_accessible_pchlan(hw))
378 ret_val = -E1000_ERR_PHY;
385 hw->phy.ops.release(hw);
388 /* Check to see if able to reset PHY. Print error if not */
389 if (hw->phy.ops.check_reset_block(hw)) {
390 ERROR_REPORT("Reset blocked by ME\n");
394 /* Reset the PHY before any access to it. Doing so, ensures
395 * that the PHY is in a known good state before we read/write
396 * PHY registers. The generic reset is sufficient here,
397 * because we haven't determined the PHY type yet.
399 ret_val = e1000_phy_hw_reset_generic(hw);
403 /* On a successful reset, possibly need to wait for the PHY
404 * to quiesce to an accessible state before returning control
405 * to the calling function. If the PHY does not quiesce, then
406 * return E1000E_BLK_PHY_RESET, as this is the condition that
409 ret_val = hw->phy.ops.check_reset_block(hw);
411 ERROR_REPORT("ME blocked access to PHY after reset\n");
415 /* Ungate automatic PHY configuration on non-managed 82579 */
416 if ((hw->mac.type == e1000_pch2lan) &&
417 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
419 e1000_gate_hw_phy_config_ich8lan(hw, false);
426 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
427 * @hw: pointer to the HW structure
429 * Initialize family-specific PHY parameters and function pointers.
431 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
433 struct e1000_phy_info *phy = &hw->phy;
436 DEBUGFUNC("e1000_init_phy_params_pchlan");
439 phy->reset_delay_us = 100;
441 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
442 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
443 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
444 phy->ops.set_page = e1000_set_page_igp;
445 phy->ops.read_reg = e1000_read_phy_reg_hv;
446 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
447 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
448 phy->ops.release = e1000_release_swflag_ich8lan;
449 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
450 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
451 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
452 phy->ops.write_reg = e1000_write_phy_reg_hv;
453 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
454 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
455 phy->ops.power_up = e1000_power_up_phy_copper;
456 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
457 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
459 phy->id = e1000_phy_unknown;
461 ret_val = e1000_init_phy_workarounds_pchlan(hw);
465 if (phy->id == e1000_phy_unknown)
466 switch (hw->mac.type) {
468 ret_val = e1000_get_phy_id(hw);
471 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
476 /* In case the PHY needs to be in mdio slow mode,
477 * set slow mode and try to get the PHY id again.
479 ret_val = e1000_set_mdio_slow_mode_hv(hw);
482 ret_val = e1000_get_phy_id(hw);
487 phy->type = e1000_get_phy_type_from_id(phy->id);
490 case e1000_phy_82577:
491 case e1000_phy_82579:
493 phy->ops.check_polarity = e1000_check_polarity_82577;
494 phy->ops.force_speed_duplex =
495 e1000_phy_force_speed_duplex_82577;
496 phy->ops.get_cable_length = e1000_get_cable_length_82577;
497 phy->ops.get_info = e1000_get_phy_info_82577;
498 phy->ops.commit = e1000_phy_sw_reset_generic;
500 case e1000_phy_82578:
501 phy->ops.check_polarity = e1000_check_polarity_m88;
502 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
503 phy->ops.get_cable_length = e1000_get_cable_length_m88;
504 phy->ops.get_info = e1000_get_phy_info_m88;
507 ret_val = -E1000_ERR_PHY;
515 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
516 * @hw: pointer to the HW structure
518 * Initialize family-specific PHY parameters and function pointers.
520 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
522 struct e1000_phy_info *phy = &hw->phy;
526 DEBUGFUNC("e1000_init_phy_params_ich8lan");
529 phy->reset_delay_us = 100;
531 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
532 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
533 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
534 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
535 phy->ops.read_reg = e1000_read_phy_reg_igp;
536 phy->ops.release = e1000_release_swflag_ich8lan;
537 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
538 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
539 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
540 phy->ops.write_reg = e1000_write_phy_reg_igp;
541 phy->ops.power_up = e1000_power_up_phy_copper;
542 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
544 /* We may need to do this twice - once for IGP and if that fails,
545 * we'll set BM func pointers and try again
547 ret_val = e1000_determine_phy_address(hw);
549 phy->ops.write_reg = e1000_write_phy_reg_bm;
550 phy->ops.read_reg = e1000_read_phy_reg_bm;
551 ret_val = e1000_determine_phy_address(hw);
553 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
559 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
562 ret_val = e1000_get_phy_id(hw);
569 case IGP03E1000_E_PHY_ID:
570 phy->type = e1000_phy_igp_3;
571 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
572 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
573 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
574 phy->ops.get_info = e1000_get_phy_info_igp;
575 phy->ops.check_polarity = e1000_check_polarity_igp;
576 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
579 case IFE_PLUS_E_PHY_ID:
581 phy->type = e1000_phy_ife;
582 phy->autoneg_mask = E1000_ALL_NOT_GIG;
583 phy->ops.get_info = e1000_get_phy_info_ife;
584 phy->ops.check_polarity = e1000_check_polarity_ife;
585 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
587 case BME1000_E_PHY_ID:
588 phy->type = e1000_phy_bm;
589 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
590 phy->ops.read_reg = e1000_read_phy_reg_bm;
591 phy->ops.write_reg = e1000_write_phy_reg_bm;
592 phy->ops.commit = e1000_phy_sw_reset_generic;
593 phy->ops.get_info = e1000_get_phy_info_m88;
594 phy->ops.check_polarity = e1000_check_polarity_m88;
595 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
598 return -E1000_ERR_PHY;
602 return E1000_SUCCESS;
606 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
607 * @hw: pointer to the HW structure
609 * Initialize family-specific NVM parameters and function
612 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
614 struct e1000_nvm_info *nvm = &hw->nvm;
615 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
616 u32 gfpreg, sector_base_addr, sector_end_addr;
619 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
621 /* Can't read flash registers if the register set isn't mapped. */
622 if (!hw->flash_address) {
623 DEBUGOUT("ERROR: Flash registers not mapped\n");
624 return -E1000_ERR_CONFIG;
627 nvm->type = e1000_nvm_flash_sw;
629 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
631 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
632 * Add 1 to sector_end_addr since this sector is included in
635 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
636 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
638 /* flash_base_addr is byte-aligned */
639 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
641 /* find total size of the NVM, then cut in half since the total
642 * size represents two separate NVM banks.
644 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
645 << FLASH_SECTOR_ADDR_SHIFT);
646 nvm->flash_bank_size /= 2;
647 /* Adjust to word count */
648 nvm->flash_bank_size /= sizeof(u16);
650 nvm->word_size = E1000_SHADOW_RAM_WORDS;
652 /* Clear shadow ram */
653 for (i = 0; i < nvm->word_size; i++) {
654 dev_spec->shadow_ram[i].modified = false;
655 dev_spec->shadow_ram[i].value = 0xFFFF;
658 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
659 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
661 /* Function Pointers */
662 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
663 nvm->ops.release = e1000_release_nvm_ich8lan;
664 nvm->ops.read = e1000_read_nvm_ich8lan;
665 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
666 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
667 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
668 nvm->ops.write = e1000_write_nvm_ich8lan;
670 return E1000_SUCCESS;
674 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
675 * @hw: pointer to the HW structure
677 * Initialize family-specific MAC parameters and function
680 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
682 struct e1000_mac_info *mac = &hw->mac;
683 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
685 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
687 DEBUGFUNC("e1000_init_mac_params_ich8lan");
689 /* Set media type function pointer */
690 hw->phy.media_type = e1000_media_type_copper;
692 /* Set mta register count */
693 mac->mta_reg_count = 32;
694 /* Set rar entry count */
695 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
696 if (mac->type == e1000_ich8lan)
697 mac->rar_entry_count--;
698 /* Set if part includes ASF firmware */
699 mac->asf_firmware_present = true;
701 mac->has_fwsm = true;
702 /* ARC subsystem not supported */
703 mac->arc_subsystem_valid = false;
704 /* Adaptive IFS supported */
705 mac->adaptive_ifs = true;
707 /* Function pointers */
709 /* bus type/speed/width */
710 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
712 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
714 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
715 /* hw initialization */
716 mac->ops.init_hw = e1000_init_hw_ich8lan;
718 mac->ops.setup_link = e1000_setup_link_ich8lan;
719 /* physical interface setup */
720 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
722 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
724 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
725 /* multicast address update */
726 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
727 /* clear hardware counters */
728 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
730 /* LED and other operations */
735 /* check management mode */
736 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
738 mac->ops.id_led_init = e1000_id_led_init_generic;
740 mac->ops.blink_led = e1000_blink_led_generic;
742 mac->ops.setup_led = e1000_setup_led_generic;
744 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
745 /* turn on/off LED */
746 mac->ops.led_on = e1000_led_on_ich8lan;
747 mac->ops.led_off = e1000_led_off_ich8lan;
750 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
751 mac->ops.rar_set = e1000_rar_set_pch2lan;
754 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
755 /* multicast address update for pch2 */
756 mac->ops.update_mc_addr_list =
757 e1000_update_mc_addr_list_pch2lan;
760 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
761 /* save PCH revision_id */
762 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
763 hw->revision_id = (u8)(pci_cfg &= 0x000F);
764 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
765 /* check management mode */
766 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
768 mac->ops.id_led_init = e1000_id_led_init_pchlan;
770 mac->ops.setup_led = e1000_setup_led_pchlan;
772 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
773 /* turn on/off LED */
774 mac->ops.led_on = e1000_led_on_pchlan;
775 mac->ops.led_off = e1000_led_off_pchlan;
781 if (mac->type == e1000_pch_lpt) {
782 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
783 mac->ops.rar_set = e1000_rar_set_pch_lpt;
784 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
787 /* Enable PCS Lock-loss workaround for ICH8 */
788 if (mac->type == e1000_ich8lan)
789 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
791 return E1000_SUCCESS;
795 * __e1000_access_emi_reg_locked - Read/write EMI register
796 * @hw: pointer to the HW structure
797 * @addr: EMI address to program
798 * @data: pointer to value to read/write from/to the EMI address
799 * @read: boolean flag to indicate read or write
801 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
803 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
804 u16 *data, bool read)
808 DEBUGFUNC("__e1000_access_emi_reg_locked");
810 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
815 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
818 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
825 * e1000_read_emi_reg_locked - Read Extended Management Interface register
826 * @hw: pointer to the HW structure
827 * @addr: EMI address to program
828 * @data: value to be read from the EMI address
830 * Assumes the SW/FW/HW Semaphore is already acquired.
832 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
834 DEBUGFUNC("e1000_read_emi_reg_locked");
836 return __e1000_access_emi_reg_locked(hw, addr, data, true);
840 * e1000_write_emi_reg_locked - Write Extended Management Interface register
841 * @hw: pointer to the HW structure
842 * @addr: EMI address to program
843 * @data: value to be written to the EMI address
845 * Assumes the SW/FW/HW Semaphore is already acquired.
847 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
849 DEBUGFUNC("e1000_read_emi_reg_locked");
851 return __e1000_access_emi_reg_locked(hw, addr, &data, false);
855 * e1000_set_eee_pchlan - Enable/disable EEE support
856 * @hw: pointer to the HW structure
858 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
859 * the link and the EEE capabilities of the link partner. The LPI Control
860 * register bits will remain set only if/when link is up.
862 * EEE LPI must not be asserted earlier than one second after link is up.
863 * On 82579, EEE LPI should not be enabled until such time otherwise there
864 * can be link issues with some switches. Other devices can have EEE LPI
865 * enabled immediately upon link up since they have a timer in hardware which
866 * prevents LPI from being asserted too early.
868 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
870 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
872 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
874 DEBUGFUNC("e1000_set_eee_pchlan");
876 switch (hw->phy.type) {
877 case e1000_phy_82579:
878 lpa = I82579_EEE_LP_ABILITY;
879 pcs_status = I82579_EEE_PCS_STATUS;
880 adv_addr = I82579_EEE_ADVERTISEMENT;
883 lpa = I217_EEE_LP_ABILITY;
884 pcs_status = I217_EEE_PCS_STATUS;
885 adv_addr = I217_EEE_ADVERTISEMENT;
888 return E1000_SUCCESS;
891 ret_val = hw->phy.ops.acquire(hw);
895 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
899 /* Clear bits that enable EEE in various speeds */
900 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
902 /* Enable EEE if not disabled by user */
903 if (!dev_spec->eee_disable) {
904 /* Save off link partner's EEE ability */
905 ret_val = e1000_read_emi_reg_locked(hw, lpa,
906 &dev_spec->eee_lp_ability);
910 /* Read EEE advertisement */
911 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
915 /* Enable EEE only for speeds in which the link partner is
916 * EEE capable and for which we advertise EEE.
918 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
919 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
921 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
922 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
923 if (data & NWAY_LPAR_100TX_FD_CAPS)
924 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
926 /* EEE is not supported in 100Half, so ignore
927 * partner's EEE in 100 ability if full-duplex
930 dev_spec->eee_lp_ability &=
931 ~I82579_EEE_100_SUPPORTED;
935 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
936 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
940 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
942 hw->phy.ops.release(hw);
948 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
949 * @hw: pointer to the HW structure
950 * @link: link up bool flag
952 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
953 * preventing further DMA write requests. Workaround the issue by disabling
954 * the de-assertion of the clock request when in 1Gpbs mode.
955 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
956 * speeds in order to avoid Tx hangs.
958 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
960 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
961 u32 status = E1000_READ_REG(hw, E1000_STATUS);
962 s32 ret_val = E1000_SUCCESS;
965 if (link && (status & E1000_STATUS_SPEED_1000)) {
966 ret_val = hw->phy.ops.acquire(hw);
971 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
977 e1000_write_kmrn_reg_locked(hw,
978 E1000_KMRNCTRLSTA_K1_CONFIG,
980 ~E1000_KMRNCTRLSTA_K1_ENABLE);
986 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
987 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
990 e1000_write_kmrn_reg_locked(hw,
991 E1000_KMRNCTRLSTA_K1_CONFIG,
994 hw->phy.ops.release(hw);
996 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
997 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
999 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1000 (status & E1000_STATUS_FD)))
1001 goto update_fextnvm6;
1003 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®);
1007 /* Clear link status transmit timeout */
1008 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1010 if (status & E1000_STATUS_SPEED_100) {
1011 /* Set inband Tx timeout to 5x10us for 100Half */
1012 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1014 /* Do not extend the K1 entry latency for 100Half */
1015 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1017 /* Set inband Tx timeout to 50x10us for 10Full/Half */
1019 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1021 /* Extend the K1 entry latency for 10 Mbps */
1022 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1025 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1030 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1036 #if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
1038 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1039 * @hw: pointer to the HW structure
1040 * @to_sx: boolean indicating a system power state transition to Sx
1042 * When link is down, configure ULP mode to significantly reduce the power
1043 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1044 * ME firmware to start the ULP configuration. If not on an ME enabled
1045 * system, configure the ULP mode by software.
1047 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1050 s32 ret_val = E1000_SUCCESS;
1053 if ((hw->mac.type < e1000_pch_lpt) ||
1054 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1055 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1056 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1062 /* Poll up to 5 seconds for Cable Disconnected indication */
1063 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1064 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1065 /* Bail if link is re-acquired */
1066 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1067 return -E1000_ERR_PHY;
1074 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1075 (E1000_READ_REG(hw, E1000_FEXT) &
1076 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1080 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1081 /* Request ME configure ULP mode in the PHY */
1082 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1083 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1084 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1089 ret_val = hw->phy.ops.acquire(hw);
1093 /* During S0 Idle keep the phy in PCI-E mode */
1094 if (hw->dev_spec.ich8lan.smbus_disable)
1097 /* Force SMBus mode in PHY */
1098 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1101 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1102 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1104 /* Force SMBus mode in MAC */
1105 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1106 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1107 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1111 /* Change the 'Link Status Change' interrupt to trigger
1112 * on 'Cable Status Change'
1114 ret_val = e1000_read_kmrn_reg_locked(hw,
1115 E1000_KMRNCTRLSTA_OP_MODES,
1119 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1120 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1124 /* Set Inband ULP Exit, Reset to SMBus mode and
1125 * Disable SMBus Release on PERST# in PHY
1127 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1130 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1131 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1133 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1134 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1136 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1138 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1140 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1142 /* Set Disable SMBus Release on PERST# in MAC */
1143 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1144 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1145 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1147 /* Commit ULP changes in PHY by starting auto ULP configuration */
1148 phy_reg |= I218_ULP_CONFIG1_START;
1149 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1152 /* Disable Tx so that the MAC doesn't send any (buffered)
1153 * packets to the PHY.
1155 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1156 mac_reg &= ~E1000_TCTL_EN;
1157 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1160 hw->phy.ops.release(hw);
1163 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1165 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1171 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1172 * @hw: pointer to the HW structure
1173 * @force: boolean indicating whether or not to force disabling ULP
1175 * Un-configure ULP mode when link is up, the system is transitioned from
1176 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1177 * system, poll for an indication from ME that ULP has been un-configured.
1178 * If not on an ME enabled system, un-configure the ULP mode by software.
1180 * During nominal operation, this function is called when link is acquired
1181 * to disable ULP mode (force=false); otherwise, for example when unloading
1182 * the driver or during Sx->S0 transitions, this is called with force=true
1183 * to forcibly disable ULP.
1185 * When the cable is plugged in while the device is in D0, a Cable Status
1186 * Change interrupt is generated which causes this function to be called
1187 * to partially disable ULP mode and restart autonegotiation. This function
1188 * is then called again due to the resulting Link Status Change interrupt
1189 * to finish cleaning up after the ULP flow.
1191 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1193 s32 ret_val = E1000_SUCCESS;
1198 if ((hw->mac.type < e1000_pch_lpt) ||
1199 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1200 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1201 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1204 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1206 /* Request ME un-configure ULP mode in the PHY */
1207 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1208 mac_reg &= ~E1000_H2ME_ULP;
1209 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1210 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1213 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1214 while (E1000_READ_REG(hw, E1000_FWSM) &
1215 E1000_FWSM_ULP_CFG_DONE) {
1217 ret_val = -E1000_ERR_PHY;
1223 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1226 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1227 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1228 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1230 /* Clear H2ME.ULP after ME ULP configuration */
1231 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1232 mac_reg &= ~E1000_H2ME_ULP;
1233 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1235 /* Restore link speed advertisements and restart
1238 ret_val = e1000_phy_setup_autoneg(hw);
1242 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1248 ret_val = hw->phy.ops.acquire(hw);
1252 /* Revert the change to the 'Link Status Change'
1253 * interrupt to trigger on 'Cable Status Change'
1255 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1259 phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1260 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1263 /* Toggle LANPHYPC Value bit */
1264 e1000_toggle_lanphypc_pch_lpt(hw);
1266 /* Unforce SMBus mode in PHY */
1267 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1269 /* The MAC might be in PCIe mode, so temporarily force to
1270 * SMBus mode in order to access the PHY.
1272 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1273 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1274 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1278 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1283 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1284 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1286 /* Unforce SMBus mode in MAC */
1287 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1288 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1289 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1291 /* When ULP mode was previously entered, K1 was disabled by the
1292 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1294 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1297 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1298 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1300 /* Clear ULP enabled configuration */
1301 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1304 /* CSC interrupt received due to ULP Indication */
1305 if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1306 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1307 I218_ULP_CONFIG1_STICKY_ULP |
1308 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1309 I218_ULP_CONFIG1_WOL_HOST |
1310 I218_ULP_CONFIG1_INBAND_EXIT |
1311 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1312 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1314 /* Commit ULP changes by starting auto ULP configuration */
1315 phy_reg |= I218_ULP_CONFIG1_START;
1316 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1318 /* Clear Disable SMBus Release on PERST# in MAC */
1319 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1320 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1321 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1324 hw->phy.ops.release(hw);
1326 if (hw->mac.autoneg)
1327 e1000_phy_setup_autoneg(hw);
1329 e1000_sw_lcd_config_ich8lan(hw);
1331 e1000_oem_bits_config_ich8lan(hw, true);
1333 /* Set ULP state to unknown and return non-zero to
1334 * indicate no link (yet) and re-enter on the next LSC
1335 * to finish disabling ULP flow.
1337 hw->dev_spec.ich8lan.ulp_state =
1338 e1000_ulp_state_unknown;
1345 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1346 mac_reg |= E1000_TCTL_EN;
1347 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1350 hw->phy.ops.release(hw);
1352 hw->phy.ops.reset(hw);
1357 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1359 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1364 #endif /* NAHUM6LP_HW && ULP_SUPPORT */
1366 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1367 * @hw: pointer to the HW structure
1369 * Checks to see of the link status of the hardware has changed. If a
1370 * change in link status has been detected, then we read the PHY registers
1371 * to get the current speed/duplex if link exists.
1373 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1375 struct e1000_mac_info *mac = &hw->mac;
1380 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1382 /* We only want to go out to the PHY registers to see if Auto-Neg
1383 * has completed and/or if our link status has changed. The
1384 * get_link_status flag is set upon receiving a Link Status
1385 * Change or Rx Sequence Error interrupt.
1387 if (!mac->get_link_status)
1388 return E1000_SUCCESS;
1390 if ((hw->mac.type < e1000_pch_lpt) ||
1391 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1392 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1393 /* First we want to see if the MII Status Register reports
1394 * link. If so, then we want to get the current speed/duplex
1397 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1401 /* Check the MAC's STATUS register to determine link state
1402 * since the PHY could be inaccessible while in ULP mode.
1404 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1406 ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1408 ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1414 if (hw->mac.type == e1000_pchlan) {
1415 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1420 /* When connected at 10Mbps half-duplex, 82579 parts are excessively
1421 * aggressive resulting in many collisions. To avoid this, increase
1422 * the IPG and reduce Rx latency in the PHY.
1424 if ((hw->mac.type == e1000_pch2lan) && link) {
1426 reg = E1000_READ_REG(hw, E1000_STATUS);
1427 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1428 reg = E1000_READ_REG(hw, E1000_TIPG);
1429 reg &= ~E1000_TIPG_IPGT_MASK;
1431 E1000_WRITE_REG(hw, E1000_TIPG, reg);
1433 /* Reduce Rx latency in analog PHY */
1434 ret_val = hw->phy.ops.acquire(hw);
1438 ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
1440 hw->phy.ops.release(hw);
1447 /* Work-around I218 hang issue */
1448 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1449 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
1450 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1455 /* Clear link partner's EEE ability */
1456 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1459 return E1000_SUCCESS; /* No link detected */
1461 mac->get_link_status = false;
1463 switch (hw->mac.type) {
1465 ret_val = e1000_k1_workaround_lv(hw);
1470 if (hw->phy.type == e1000_phy_82578) {
1471 ret_val = e1000_link_stall_workaround_hv(hw);
1476 /* Workaround for PCHx parts in half-duplex:
1477 * Set the number of preambles removed from the packet
1478 * when it is passed from the PHY to the MAC to prevent
1479 * the MAC from misinterpreting the packet type.
1481 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1482 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1484 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1486 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1488 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1494 /* Check if there was DownShift, must be checked
1495 * immediately after link-up
1497 e1000_check_downshift_generic(hw);
1499 /* Enable/Disable EEE after link up */
1500 if (hw->phy.type > e1000_phy_82579) {
1501 ret_val = e1000_set_eee_pchlan(hw);
1506 /* If we are forcing speed/duplex, then we simply return since
1507 * we have already determined whether we have link or not.
1510 return -E1000_ERR_CONFIG;
1512 /* Auto-Neg is enabled. Auto Speed Detection takes care
1513 * of MAC speed/duplex configuration. So we only need to
1514 * configure Collision Distance in the MAC.
1516 mac->ops.config_collision_dist(hw);
1518 /* Configure Flow Control now that Auto-Neg has completed.
1519 * First, we need to restore the desired flow control
1520 * settings because we may have had to re-autoneg with a
1521 * different link partner.
1523 ret_val = e1000_config_fc_after_link_up_generic(hw);
1525 DEBUGOUT("Error configuring flow control\n");
1531 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1532 * @hw: pointer to the HW structure
1534 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1536 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1538 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1540 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1541 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1542 switch (hw->mac.type) {
1545 case e1000_ich10lan:
1546 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1551 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1559 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1560 * @hw: pointer to the HW structure
1562 * Acquires the mutex for performing NVM operations.
1564 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1566 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1568 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1570 return E1000_SUCCESS;
1574 * e1000_release_nvm_ich8lan - Release NVM mutex
1575 * @hw: pointer to the HW structure
1577 * Releases the mutex used while performing NVM operations.
1579 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1581 DEBUGFUNC("e1000_release_nvm_ich8lan");
1583 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1589 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1590 * @hw: pointer to the HW structure
1592 * Acquires the software control flag for performing PHY and select
1595 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1597 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1598 s32 ret_val = E1000_SUCCESS;
1600 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1602 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1605 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1606 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1614 DEBUGOUT("SW has already locked the resource.\n");
1615 ret_val = -E1000_ERR_CONFIG;
1619 timeout = SW_FLAG_TIMEOUT;
1621 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1622 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1625 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1626 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1634 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1635 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1636 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1637 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1638 ret_val = -E1000_ERR_CONFIG;
1644 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1650 * e1000_release_swflag_ich8lan - Release software control flag
1651 * @hw: pointer to the HW structure
1653 * Releases the software control flag for performing PHY and select
1656 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1660 DEBUGFUNC("e1000_release_swflag_ich8lan");
1662 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1664 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1665 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1666 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1668 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1671 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1677 * e1000_check_mng_mode_ich8lan - Checks management mode
1678 * @hw: pointer to the HW structure
1680 * This checks if the adapter has any manageability enabled.
1681 * This is a function pointer entry point only called by read/write
1682 * routines for the PHY and NVM parts.
1684 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1688 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1690 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1692 return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
1693 ((fwsm & E1000_FWSM_MODE_MASK) ==
1694 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
1698 * e1000_check_mng_mode_pchlan - Checks management mode
1699 * @hw: pointer to the HW structure
1701 * This checks if the adapter has iAMT enabled.
1702 * This is a function pointer entry point only called by read/write
1703 * routines for the PHY and NVM parts.
1705 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1709 DEBUGFUNC("e1000_check_mng_mode_pchlan");
1711 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1713 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1714 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1718 * e1000_rar_set_pch2lan - Set receive address register
1719 * @hw: pointer to the HW structure
1720 * @addr: pointer to the receive address
1721 * @index: receive address array register
1723 * Sets the receive address array register at index to the address passed
1724 * in by addr. For 82579, RAR[0] is the base address register that is to
1725 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1726 * Use SHRA[0-3] in place of those reserved for ME.
1728 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1730 u32 rar_low, rar_high;
1732 DEBUGFUNC("e1000_rar_set_pch2lan");
1734 /* HW expects these in little endian so we reverse the byte order
1735 * from network order (big endian) to little endian
1737 rar_low = ((u32) addr[0] |
1738 ((u32) addr[1] << 8) |
1739 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1741 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1743 /* If MAC address zero, no need to set the AV bit */
1744 if (rar_low || rar_high)
1745 rar_high |= E1000_RAH_AV;
1748 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1749 E1000_WRITE_FLUSH(hw);
1750 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1751 E1000_WRITE_FLUSH(hw);
1755 /* RAR[1-6] are owned by manageability. Skip those and program the
1756 * next address into the SHRA register array.
1758 if (index < (u32) (hw->mac.rar_entry_count - 6)) {
1761 ret_val = e1000_acquire_swflag_ich8lan(hw);
1765 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1766 E1000_WRITE_FLUSH(hw);
1767 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1768 E1000_WRITE_FLUSH(hw);
1770 e1000_release_swflag_ich8lan(hw);
1772 /* verify the register updates */
1773 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1774 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1777 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1778 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1782 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1786 * e1000_rar_set_pch_lpt - Set receive address registers
1787 * @hw: pointer to the HW structure
1788 * @addr: pointer to the receive address
1789 * @index: receive address array register
1791 * Sets the receive address register array at index to the address passed
1792 * in by addr. For LPT, RAR[0] is the base address register that is to
1793 * contain the MAC address. SHRA[0-10] are the shared receive address
1794 * registers that are shared between the Host and manageability engine (ME).
1796 STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1798 u32 rar_low, rar_high;
1801 DEBUGFUNC("e1000_rar_set_pch_lpt");
1803 /* HW expects these in little endian so we reverse the byte order
1804 * from network order (big endian) to little endian
1806 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1807 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1809 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1811 /* If MAC address zero, no need to set the AV bit */
1812 if (rar_low || rar_high)
1813 rar_high |= E1000_RAH_AV;
1816 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1817 E1000_WRITE_FLUSH(hw);
1818 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1819 E1000_WRITE_FLUSH(hw);
1823 /* The manageability engine (ME) can lock certain SHRAR registers that
1824 * it is using - those registers are unavailable for use.
1826 if (index < hw->mac.rar_entry_count) {
1827 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1828 E1000_FWSM_WLOCK_MAC_MASK;
1829 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1831 /* Check if all SHRAR registers are locked */
1835 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1838 ret_val = e1000_acquire_swflag_ich8lan(hw);
1843 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1845 E1000_WRITE_FLUSH(hw);
1846 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1848 E1000_WRITE_FLUSH(hw);
1850 e1000_release_swflag_ich8lan(hw);
1852 /* verify the register updates */
1853 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1854 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1860 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1863 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1865 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1866 * @hw: pointer to the HW structure
1867 * @mc_addr_list: array of multicast addresses to program
1868 * @mc_addr_count: number of multicast addresses to program
1870 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1871 * The caller must have a packed mc_addr_list of multicast addresses.
1873 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1881 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1883 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1885 ret_val = hw->phy.ops.acquire(hw);
1889 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1893 for (i = 0; i < hw->mac.mta_reg_count; i++) {
1894 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1895 (u16)(hw->mac.mta_shadow[i] &
1897 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1898 (u16)((hw->mac.mta_shadow[i] >> 16) &
1902 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1905 hw->phy.ops.release(hw);
1908 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1910 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1911 * @hw: pointer to the HW structure
1913 * Checks if firmware is blocking the reset of the PHY.
1914 * This is a function pointer entry point only called by
1917 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1920 bool blocked = false;
1923 DEBUGFUNC("e1000_check_reset_block_ich8lan");
1926 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1927 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1933 } while (blocked && (i++ < 10));
1934 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1938 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1939 * @hw: pointer to the HW structure
1941 * Assumes semaphore already acquired.
1944 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1947 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1948 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1949 E1000_STRAP_SMT_FREQ_SHIFT;
1952 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1954 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1958 phy_data &= ~HV_SMB_ADDR_MASK;
1959 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1960 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1962 if (hw->phy.type == e1000_phy_i217) {
1963 /* Restore SMBus frequency */
1965 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1966 phy_data |= (freq & (1 << 0)) <<
1967 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1968 phy_data |= (freq & (1 << 1)) <<
1969 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1971 DEBUGOUT("Unsupported SMB frequency in PHY\n");
1975 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1979 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1980 * @hw: pointer to the HW structure
1982 * SW should configure the LCD from the NVM extended configuration region
1983 * as a workaround for certain parts.
1985 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1987 struct e1000_phy_info *phy = &hw->phy;
1988 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1989 s32 ret_val = E1000_SUCCESS;
1990 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1992 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1994 /* Initialize the PHY from the NVM on ICH platforms. This
1995 * is needed due to an issue where the NVM configuration is
1996 * not properly autoloaded after power transitions.
1997 * Therefore, after each PHY reset, we will load the
1998 * configuration data out of the NVM manually.
2000 switch (hw->mac.type) {
2002 if (phy->type != e1000_phy_igp_3)
2005 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2006 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2007 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2014 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2020 ret_val = hw->phy.ops.acquire(hw);
2024 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2025 if (!(data & sw_cfg_mask))
2028 /* Make sure HW does not configure LCD from PHY
2029 * extended configuration before SW configuration
2031 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2032 if ((hw->mac.type < e1000_pch2lan) &&
2033 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2036 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2037 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2038 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2042 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2043 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2045 if (((hw->mac.type == e1000_pchlan) &&
2046 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2047 (hw->mac.type > e1000_pchlan)) {
2048 /* HW configures the SMBus address and LEDs when the
2049 * OEM and LCD Write Enable bits are set in the NVM.
2050 * When both NVM bits are cleared, SW will configure
2053 ret_val = e1000_write_smbus_addr(hw);
2057 data = E1000_READ_REG(hw, E1000_LEDCTL);
2058 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2064 /* Configure LCD from extended configuration region. */
2066 /* cnf_base_addr is in DWORD */
2067 word_addr = (u16)(cnf_base_addr << 1);
2069 for (i = 0; i < cnf_size; i++) {
2070 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2075 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2080 /* Save off the PHY page for future writes. */
2081 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2082 phy_page = reg_data;
2086 reg_addr &= PHY_REG_MASK;
2087 reg_addr |= phy_page;
2089 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2096 hw->phy.ops.release(hw);
2101 * e1000_k1_gig_workaround_hv - K1 Si workaround
2102 * @hw: pointer to the HW structure
2103 * @link: link up bool flag
2105 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2106 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
2107 * If link is down, the function will restore the default K1 setting located
2110 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2112 s32 ret_val = E1000_SUCCESS;
2114 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2116 DEBUGFUNC("e1000_k1_gig_workaround_hv");
2118 if (hw->mac.type != e1000_pchlan)
2119 return E1000_SUCCESS;
2121 /* Wrap the whole flow with the sw flag */
2122 ret_val = hw->phy.ops.acquire(hw);
2126 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2128 if (hw->phy.type == e1000_phy_82578) {
2129 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2134 status_reg &= (BM_CS_STATUS_LINK_UP |
2135 BM_CS_STATUS_RESOLVED |
2136 BM_CS_STATUS_SPEED_MASK);
2138 if (status_reg == (BM_CS_STATUS_LINK_UP |
2139 BM_CS_STATUS_RESOLVED |
2140 BM_CS_STATUS_SPEED_1000))
2144 if (hw->phy.type == e1000_phy_82577) {
2145 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2150 status_reg &= (HV_M_STATUS_LINK_UP |
2151 HV_M_STATUS_AUTONEG_COMPLETE |
2152 HV_M_STATUS_SPEED_MASK);
2154 if (status_reg == (HV_M_STATUS_LINK_UP |
2155 HV_M_STATUS_AUTONEG_COMPLETE |
2156 HV_M_STATUS_SPEED_1000))
2160 /* Link stall fix for link up */
2161 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2167 /* Link stall fix for link down */
2168 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2174 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2177 hw->phy.ops.release(hw);
2183 * e1000_configure_k1_ich8lan - Configure K1 power state
2184 * @hw: pointer to the HW structure
2185 * @enable: K1 state to configure
2187 * Configure the K1 power state based on the provided parameter.
2188 * Assumes semaphore already acquired.
2190 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2192 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2200 DEBUGFUNC("e1000_configure_k1_ich8lan");
2202 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2208 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2210 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2212 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2218 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2219 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2221 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2222 reg |= E1000_CTRL_FRCSPD;
2223 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2225 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2226 E1000_WRITE_FLUSH(hw);
2228 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2229 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2230 E1000_WRITE_FLUSH(hw);
2233 return E1000_SUCCESS;
2237 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2238 * @hw: pointer to the HW structure
2239 * @d0_state: boolean if entering d0 or d3 device state
2241 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2242 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
2243 * in NVM determines whether HW should configure LPLU and Gbe Disable.
2245 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2251 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2253 if (hw->mac.type < e1000_pchlan)
2256 ret_val = hw->phy.ops.acquire(hw);
2260 if (hw->mac.type == e1000_pchlan) {
2261 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2262 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2266 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2267 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2270 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2272 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2276 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2279 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2280 oem_reg |= HV_OEM_BITS_GBE_DIS;
2282 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2283 oem_reg |= HV_OEM_BITS_LPLU;
2285 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2286 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2287 oem_reg |= HV_OEM_BITS_GBE_DIS;
2289 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2290 E1000_PHY_CTRL_NOND0A_LPLU))
2291 oem_reg |= HV_OEM_BITS_LPLU;
2294 /* Set Restart auto-neg to activate the bits */
2295 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2296 !hw->phy.ops.check_reset_block(hw))
2297 oem_reg |= HV_OEM_BITS_RESTART_AN;
2299 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2302 hw->phy.ops.release(hw);
2309 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2310 * @hw: pointer to the HW structure
2312 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2317 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2319 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2323 data |= HV_KMRN_MDIO_SLOW;
2325 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2331 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2332 * done after every PHY reset.
2334 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2336 s32 ret_val = E1000_SUCCESS;
2339 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2341 if (hw->mac.type != e1000_pchlan)
2342 return E1000_SUCCESS;
2344 /* Set MDIO slow mode before any other MDIO access */
2345 if (hw->phy.type == e1000_phy_82577) {
2346 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2351 if (((hw->phy.type == e1000_phy_82577) &&
2352 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2353 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2354 /* Disable generation of early preamble */
2355 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2359 /* Preamble tuning for SSC */
2360 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2366 if (hw->phy.type == e1000_phy_82578) {
2367 /* Return registers to default by doing a soft reset then
2368 * writing 0x3140 to the control register.
2370 if (hw->phy.revision < 2) {
2371 e1000_phy_sw_reset_generic(hw);
2372 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2378 ret_val = hw->phy.ops.acquire(hw);
2383 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2384 hw->phy.ops.release(hw);
2388 /* Configure the K1 Si workaround during phy reset assuming there is
2389 * link so that it disables K1 if link is in 1Gbps.
2391 ret_val = e1000_k1_gig_workaround_hv(hw, true);
2395 /* Workaround for link disconnects on a busy hub in half duplex */
2396 ret_val = hw->phy.ops.acquire(hw);
2399 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2402 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2407 /* set MSE higher to enable link to stay up when noise is high */
2408 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2410 hw->phy.ops.release(hw);
2416 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2417 * @hw: pointer to the HW structure
2419 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2425 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2427 ret_val = hw->phy.ops.acquire(hw);
2430 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2434 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2435 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2436 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2437 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2438 (u16)(mac_reg & 0xFFFF));
2439 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2440 (u16)((mac_reg >> 16) & 0xFFFF));
2442 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2443 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2444 (u16)(mac_reg & 0xFFFF));
2445 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2446 (u16)((mac_reg & E1000_RAH_AV)
2450 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2453 hw->phy.ops.release(hw);
2456 #ifndef CRC32_OS_SUPPORT
2457 static u32 e1000_calc_rx_da_crc(u8 mac[])
2459 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
2460 u32 i, j, mask, crc;
2462 DEBUGFUNC("e1000_calc_rx_da_crc");
2465 for (i = 0; i < 6; i++) {
2467 for (j = 8; j > 0; j--) {
2468 mask = (crc & 1) * (-1);
2469 crc = (crc >> 1) ^ (poly & mask);
2475 #endif /* CRC32_OS_SUPPORT */
2477 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2479 * @hw: pointer to the HW structure
2480 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
2482 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2484 s32 ret_val = E1000_SUCCESS;
2489 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2491 if (hw->mac.type < e1000_pch2lan)
2492 return E1000_SUCCESS;
2494 /* disable Rx path while enabling/disabling workaround */
2495 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2496 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2497 phy_reg | (1 << 14));
2502 /* Write Rx addresses (rar_entry_count for RAL/H, and
2503 * SHRAL/H) and initial CRC values to the MAC
2505 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2506 u8 mac_addr[ETH_ADDR_LEN] = {0};
2507 u32 addr_high, addr_low;
2509 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2510 if (!(addr_high & E1000_RAH_AV))
2512 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2513 mac_addr[0] = (addr_low & 0xFF);
2514 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2515 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2516 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2517 mac_addr[4] = (addr_high & 0xFF);
2518 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2520 #ifndef CRC32_OS_SUPPORT
2521 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2522 e1000_calc_rx_da_crc(mac_addr));
2523 #else /* CRC32_OS_SUPPORT */
2524 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2525 E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2526 #endif /* CRC32_OS_SUPPORT */
2529 /* Write Rx addresses to the PHY */
2530 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2532 /* Enable jumbo frame workaround in the MAC */
2533 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2534 mac_reg &= ~(1 << 14);
2535 mac_reg |= (7 << 15);
2536 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2538 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2539 mac_reg |= E1000_RCTL_SECRC;
2540 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2542 ret_val = e1000_read_kmrn_reg_generic(hw,
2543 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2547 ret_val = e1000_write_kmrn_reg_generic(hw,
2548 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2552 ret_val = e1000_read_kmrn_reg_generic(hw,
2553 E1000_KMRNCTRLSTA_HD_CTRL,
2557 data &= ~(0xF << 8);
2559 ret_val = e1000_write_kmrn_reg_generic(hw,
2560 E1000_KMRNCTRLSTA_HD_CTRL,
2565 /* Enable jumbo frame workaround in the PHY */
2566 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2567 data &= ~(0x7F << 5);
2568 data |= (0x37 << 5);
2569 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2572 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2574 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2577 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2578 data &= ~(0x3FF << 2);
2579 data |= (0x1A << 2);
2580 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2583 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2586 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2587 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2592 /* Write MAC register values back to h/w defaults */
2593 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2594 mac_reg &= ~(0xF << 14);
2595 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2597 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2598 mac_reg &= ~E1000_RCTL_SECRC;
2599 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2601 ret_val = e1000_read_kmrn_reg_generic(hw,
2602 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2606 ret_val = e1000_write_kmrn_reg_generic(hw,
2607 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2611 ret_val = e1000_read_kmrn_reg_generic(hw,
2612 E1000_KMRNCTRLSTA_HD_CTRL,
2616 data &= ~(0xF << 8);
2618 ret_val = e1000_write_kmrn_reg_generic(hw,
2619 E1000_KMRNCTRLSTA_HD_CTRL,
2624 /* Write PHY register values back to h/w defaults */
2625 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2626 data &= ~(0x7F << 5);
2627 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2630 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2632 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2635 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2636 data &= ~(0x3FF << 2);
2638 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2641 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2644 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2645 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2651 /* re-enable Rx path after enabling/disabling workaround */
2652 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2657 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2658 * done after every PHY reset.
2660 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2662 s32 ret_val = E1000_SUCCESS;
2664 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2666 if (hw->mac.type != e1000_pch2lan)
2667 return E1000_SUCCESS;
2669 /* Set MDIO slow mode before any other MDIO access */
2670 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2674 ret_val = hw->phy.ops.acquire(hw);
2677 /* set MSE higher to enable link to stay up when noise is high */
2678 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2681 /* drop link after 5 times MSE threshold was reached */
2682 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2684 hw->phy.ops.release(hw);
2690 * e1000_k1_gig_workaround_lv - K1 Si workaround
2691 * @hw: pointer to the HW structure
2693 * Workaround to set the K1 beacon duration for 82579 parts
2695 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2697 s32 ret_val = E1000_SUCCESS;
2702 DEBUGFUNC("e1000_k1_workaround_lv");
2704 if (hw->mac.type != e1000_pch2lan)
2705 return E1000_SUCCESS;
2707 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
2708 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2712 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2713 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2714 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2715 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2717 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
2721 if (status_reg & HV_M_STATUS_SPEED_1000) {
2724 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2725 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2726 /* LV 1G Packet drop issue wa */
2727 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2731 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2732 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2737 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2738 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2740 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2741 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2748 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2749 * @hw: pointer to the HW structure
2750 * @gate: boolean set to true to gate, false to ungate
2752 * Gate/ungate the automatic PHY configuration via hardware; perform
2753 * the configuration via software instead.
2755 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2759 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2761 if (hw->mac.type < e1000_pch2lan)
2764 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2767 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2769 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2771 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2775 * e1000_lan_init_done_ich8lan - Check for PHY config completion
2776 * @hw: pointer to the HW structure
2778 * Check the appropriate indication the MAC has finished configuring the
2779 * PHY after a software reset.
2781 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2783 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2785 DEBUGFUNC("e1000_lan_init_done_ich8lan");
2787 /* Wait for basic configuration completes before proceeding */
2789 data = E1000_READ_REG(hw, E1000_STATUS);
2790 data &= E1000_STATUS_LAN_INIT_DONE;
2792 } while ((!data) && --loop);
2794 /* If basic configuration is incomplete before the above loop
2795 * count reaches 0, loading the configuration from NVM will
2796 * leave the PHY in a bad state possibly resulting in no link.
2799 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2801 /* Clear the Init Done bit for the next init event */
2802 data = E1000_READ_REG(hw, E1000_STATUS);
2803 data &= ~E1000_STATUS_LAN_INIT_DONE;
2804 E1000_WRITE_REG(hw, E1000_STATUS, data);
2808 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2809 * @hw: pointer to the HW structure
2811 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2813 s32 ret_val = E1000_SUCCESS;
2816 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2818 if (hw->phy.ops.check_reset_block(hw))
2819 return E1000_SUCCESS;
2821 /* Allow time for h/w to get to quiescent state after reset */
2824 /* Perform any necessary post-reset workarounds */
2825 switch (hw->mac.type) {
2827 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2832 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2840 /* Clear the host wakeup bit after lcd reset */
2841 if (hw->mac.type >= e1000_pchlan) {
2842 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
2843 reg &= ~BM_WUC_HOST_WU_BIT;
2844 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2847 /* Configure the LCD with the extended configuration region in NVM */
2848 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2852 /* Configure the LCD with the OEM bits in NVM */
2853 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2855 if (hw->mac.type == e1000_pch2lan) {
2856 /* Ungate automatic PHY configuration on non-managed 82579 */
2857 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2858 E1000_ICH_FWSM_FW_VALID)) {
2860 e1000_gate_hw_phy_config_ich8lan(hw, false);
2863 /* Set EEE LPI Update Timer to 200usec */
2864 ret_val = hw->phy.ops.acquire(hw);
2867 ret_val = e1000_write_emi_reg_locked(hw,
2868 I82579_LPI_UPDATE_TIMER,
2870 hw->phy.ops.release(hw);
2877 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2878 * @hw: pointer to the HW structure
2881 * This is a function pointer entry point called by drivers
2882 * or other shared routines.
2884 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2886 s32 ret_val = E1000_SUCCESS;
2888 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2890 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2891 if ((hw->mac.type == e1000_pch2lan) &&
2892 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2893 e1000_gate_hw_phy_config_ich8lan(hw, true);
2895 ret_val = e1000_phy_hw_reset_generic(hw);
2899 return e1000_post_phy_reset_ich8lan(hw);
2903 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2904 * @hw: pointer to the HW structure
2905 * @active: true to enable LPLU, false to disable
2907 * Sets the LPLU state according to the active flag. For PCH, if OEM write
2908 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2909 * the phy speed. This function will manually set the LPLU bit and restart
2910 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
2911 * since it configures the same bit.
2913 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2918 DEBUGFUNC("e1000_set_lplu_state_pchlan");
2920 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2925 oem_reg |= HV_OEM_BITS_LPLU;
2927 oem_reg &= ~HV_OEM_BITS_LPLU;
2929 if (!hw->phy.ops.check_reset_block(hw))
2930 oem_reg |= HV_OEM_BITS_RESTART_AN;
2932 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2936 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2937 * @hw: pointer to the HW structure
2938 * @active: true to enable LPLU, false to disable
2940 * Sets the LPLU D0 state according to the active flag. When
2941 * activating LPLU this function also disables smart speed
2942 * and vice versa. LPLU will not be activated unless the
2943 * device autonegotiation advertisement meets standards of
2944 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2945 * This is a function pointer entry point only called by
2946 * PHY setup routines.
2948 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2950 struct e1000_phy_info *phy = &hw->phy;
2952 s32 ret_val = E1000_SUCCESS;
2955 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2957 if (phy->type == e1000_phy_ife)
2958 return E1000_SUCCESS;
2960 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2963 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2964 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2966 if (phy->type != e1000_phy_igp_3)
2967 return E1000_SUCCESS;
2969 /* Call gig speed drop workaround on LPLU before accessing
2972 if (hw->mac.type == e1000_ich8lan)
2973 e1000_gig_downshift_workaround_ich8lan(hw);
2975 /* When LPLU is enabled, we should disable SmartSpeed */
2976 ret_val = phy->ops.read_reg(hw,
2977 IGP01E1000_PHY_PORT_CONFIG,
2981 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2982 ret_val = phy->ops.write_reg(hw,
2983 IGP01E1000_PHY_PORT_CONFIG,
2988 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2989 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2991 if (phy->type != e1000_phy_igp_3)
2992 return E1000_SUCCESS;
2994 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
2995 * during Dx states where the power conservation is most
2996 * important. During driver activity we should enable
2997 * SmartSpeed, so performance is maintained.
2999 if (phy->smart_speed == e1000_smart_speed_on) {
3000 ret_val = phy->ops.read_reg(hw,
3001 IGP01E1000_PHY_PORT_CONFIG,
3006 data |= IGP01E1000_PSCFR_SMART_SPEED;
3007 ret_val = phy->ops.write_reg(hw,
3008 IGP01E1000_PHY_PORT_CONFIG,
3012 } else if (phy->smart_speed == e1000_smart_speed_off) {
3013 ret_val = phy->ops.read_reg(hw,
3014 IGP01E1000_PHY_PORT_CONFIG,
3019 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3020 ret_val = phy->ops.write_reg(hw,
3021 IGP01E1000_PHY_PORT_CONFIG,
3028 return E1000_SUCCESS;
3032 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3033 * @hw: pointer to the HW structure
3034 * @active: true to enable LPLU, false to disable
3036 * Sets the LPLU D3 state according to the active flag. When
3037 * activating LPLU this function also disables smart speed
3038 * and vice versa. LPLU will not be activated unless the
3039 * device autonegotiation advertisement meets standards of
3040 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3041 * This is a function pointer entry point only called by
3042 * PHY setup routines.
3044 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3046 struct e1000_phy_info *phy = &hw->phy;
3048 s32 ret_val = E1000_SUCCESS;
3051 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3053 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3056 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3057 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3059 if (phy->type != e1000_phy_igp_3)
3060 return E1000_SUCCESS;
3062 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3063 * during Dx states where the power conservation is most
3064 * important. During driver activity we should enable
3065 * SmartSpeed, so performance is maintained.
3067 if (phy->smart_speed == e1000_smart_speed_on) {
3068 ret_val = phy->ops.read_reg(hw,
3069 IGP01E1000_PHY_PORT_CONFIG,
3074 data |= IGP01E1000_PSCFR_SMART_SPEED;
3075 ret_val = phy->ops.write_reg(hw,
3076 IGP01E1000_PHY_PORT_CONFIG,
3080 } else if (phy->smart_speed == e1000_smart_speed_off) {
3081 ret_val = phy->ops.read_reg(hw,
3082 IGP01E1000_PHY_PORT_CONFIG,
3087 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3088 ret_val = phy->ops.write_reg(hw,
3089 IGP01E1000_PHY_PORT_CONFIG,
3094 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3095 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3096 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3097 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3098 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3100 if (phy->type != e1000_phy_igp_3)
3101 return E1000_SUCCESS;
3103 /* Call gig speed drop workaround on LPLU before accessing
3106 if (hw->mac.type == e1000_ich8lan)
3107 e1000_gig_downshift_workaround_ich8lan(hw);
3109 /* When LPLU is enabled, we should disable SmartSpeed */
3110 ret_val = phy->ops.read_reg(hw,
3111 IGP01E1000_PHY_PORT_CONFIG,
3116 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3117 ret_val = phy->ops.write_reg(hw,
3118 IGP01E1000_PHY_PORT_CONFIG,
3126 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3127 * @hw: pointer to the HW structure
3128 * @bank: pointer to the variable that returns the active bank
3130 * Reads signature byte from the NVM using the flash access registers.
3131 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3133 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3136 struct e1000_nvm_info *nvm = &hw->nvm;
3137 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3138 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3142 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3144 switch (hw->mac.type) {
3147 eecd = E1000_READ_REG(hw, E1000_EECD);
3148 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3149 E1000_EECD_SEC1VAL_VALID_MASK) {
3150 if (eecd & E1000_EECD_SEC1VAL)
3155 return E1000_SUCCESS;
3157 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3160 /* set bank to 0 in case flash read fails */
3164 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3168 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3169 E1000_ICH_NVM_SIG_VALUE) {
3171 return E1000_SUCCESS;
3175 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3180 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3181 E1000_ICH_NVM_SIG_VALUE) {
3183 return E1000_SUCCESS;
3186 DEBUGOUT("ERROR: No valid NVM bank present\n");
3187 return -E1000_ERR_NVM;
3192 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
3193 * @hw: pointer to the HW structure
3194 * @offset: The offset (in bytes) of the word(s) to read.
3195 * @words: Size of data to read in words
3196 * @data: Pointer to the word(s) to read at offset.
3198 * Reads a word(s) from the NVM using the flash access registers.
3200 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3203 struct e1000_nvm_info *nvm = &hw->nvm;
3204 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3206 s32 ret_val = E1000_SUCCESS;
3210 DEBUGFUNC("e1000_read_nvm_ich8lan");
3212 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3214 DEBUGOUT("nvm parameter(s) out of bounds\n");
3215 ret_val = -E1000_ERR_NVM;
3219 nvm->ops.acquire(hw);
3221 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3222 if (ret_val != E1000_SUCCESS) {
3223 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3227 act_offset = (bank) ? nvm->flash_bank_size : 0;
3228 act_offset += offset;
3230 ret_val = E1000_SUCCESS;
3231 for (i = 0; i < words; i++) {
3232 if (dev_spec->shadow_ram[offset+i].modified) {
3233 data[i] = dev_spec->shadow_ram[offset+i].value;
3235 ret_val = e1000_read_flash_word_ich8lan(hw,
3244 nvm->ops.release(hw);
3248 DEBUGOUT1("NVM read error: %d\n", ret_val);
3254 * e1000_flash_cycle_init_ich8lan - Initialize flash
3255 * @hw: pointer to the HW structure
3257 * This function does initial flash setup so that a new read/write/erase cycle
3260 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3262 union ich8_hws_flash_status hsfsts;
3263 s32 ret_val = -E1000_ERR_NVM;
3265 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3267 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3269 /* Check if the flash descriptor is valid */
3270 if (!hsfsts.hsf_status.fldesvalid) {
3271 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
3272 return -E1000_ERR_NVM;
3275 /* Clear FCERR and DAEL in hw status by writing 1 */
3276 hsfsts.hsf_status.flcerr = 1;
3277 hsfsts.hsf_status.dael = 1;
3279 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3281 /* Either we should have a hardware SPI cycle in progress
3282 * bit to check against, in order to start a new cycle or
3283 * FDONE bit should be changed in the hardware so that it
3284 * is 1 after hardware reset, which can then be used as an
3285 * indication whether a cycle is in progress or has been
3289 if (!hsfsts.hsf_status.flcinprog) {
3290 /* There is no cycle running at present,
3291 * so we can start a cycle.
3292 * Begin by setting Flash Cycle Done.
3294 hsfsts.hsf_status.flcdone = 1;
3295 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3296 ret_val = E1000_SUCCESS;
3300 /* Otherwise poll for sometime so the current
3301 * cycle has a chance to end before giving up.
3303 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3304 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3306 if (!hsfsts.hsf_status.flcinprog) {
3307 ret_val = E1000_SUCCESS;
3312 if (ret_val == E1000_SUCCESS) {
3313 /* Successful in waiting for previous cycle to timeout,
3314 * now set the Flash Cycle Done.
3316 hsfsts.hsf_status.flcdone = 1;
3317 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3320 DEBUGOUT("Flash controller busy, cannot get access\n");
3328 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3329 * @hw: pointer to the HW structure
3330 * @timeout: maximum time to wait for completion
3332 * This function starts a flash cycle and waits for its completion.
3334 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3336 union ich8_hws_flash_ctrl hsflctl;
3337 union ich8_hws_flash_status hsfsts;
3340 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3342 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3343 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3344 hsflctl.hsf_ctrl.flcgo = 1;
3345 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3347 /* wait till FDONE bit is set to 1 */
3349 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3350 if (hsfsts.hsf_status.flcdone)
3353 } while (i++ < timeout);
3355 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3356 return E1000_SUCCESS;
3358 return -E1000_ERR_NVM;
3362 * e1000_read_flash_word_ich8lan - Read word from flash
3363 * @hw: pointer to the HW structure
3364 * @offset: offset to data location
3365 * @data: pointer to the location for storing the data
3367 * Reads the flash word at offset into data. Offset is converted
3368 * to bytes before read.
3370 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3373 DEBUGFUNC("e1000_read_flash_word_ich8lan");
3376 return -E1000_ERR_NVM;
3378 /* Must convert offset into bytes. */
3381 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3385 * e1000_read_flash_byte_ich8lan - Read byte from flash
3386 * @hw: pointer to the HW structure
3387 * @offset: The offset of the byte to read.
3388 * @data: Pointer to a byte to store the value read.
3390 * Reads a single byte from the NVM using the flash access registers.
3392 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3398 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3404 return E1000_SUCCESS;
3408 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3409 * @hw: pointer to the HW structure
3410 * @offset: The offset (in bytes) of the byte or word to read.
3411 * @size: Size of data to read, 1=byte 2=word
3412 * @data: Pointer to the word to store the value read.
3414 * Reads a byte or word from the NVM using the flash access registers.
3416 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3419 union ich8_hws_flash_status hsfsts;
3420 union ich8_hws_flash_ctrl hsflctl;
3421 u32 flash_linear_addr;
3423 s32 ret_val = -E1000_ERR_NVM;
3426 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3428 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3429 return -E1000_ERR_NVM;
3431 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3432 hw->nvm.flash_base_addr);
3437 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3438 if (ret_val != E1000_SUCCESS)
3441 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3442 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3443 hsflctl.hsf_ctrl.fldbcount = size - 1;
3444 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3445 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3447 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3450 e1000_flash_cycle_ich8lan(hw,
3451 ICH_FLASH_READ_COMMAND_TIMEOUT);
3453 /* Check if FCERR is set to 1, if set to 1, clear it
3454 * and try the whole sequence a few more times, else
3455 * read in (shift in) the Flash Data0, the order is
3456 * least significant byte first msb to lsb
3458 if (ret_val == E1000_SUCCESS) {
3459 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3461 *data = (u8)(flash_data & 0x000000FF);
3463 *data = (u16)(flash_data & 0x0000FFFF);
3466 /* If we've gotten here, then things are probably
3467 * completely hosed, but if the error condition is
3468 * detected, it won't hurt to give it another try...
3469 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3471 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3473 if (hsfsts.hsf_status.flcerr) {
3474 /* Repeat for some time before giving up. */
3476 } else if (!hsfsts.hsf_status.flcdone) {
3477 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3481 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3487 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
3488 * @hw: pointer to the HW structure
3489 * @offset: The offset (in bytes) of the word(s) to write.
3490 * @words: Size of data to write in words
3491 * @data: Pointer to the word(s) to write at offset.
3493 * Writes a byte or word to the NVM using the flash access registers.
3495 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3498 struct e1000_nvm_info *nvm = &hw->nvm;
3499 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3502 DEBUGFUNC("e1000_write_nvm_ich8lan");
3504 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3506 DEBUGOUT("nvm parameter(s) out of bounds\n");
3507 return -E1000_ERR_NVM;
3510 nvm->ops.acquire(hw);
3512 for (i = 0; i < words; i++) {
3513 dev_spec->shadow_ram[offset+i].modified = true;
3514 dev_spec->shadow_ram[offset+i].value = data[i];
3517 nvm->ops.release(hw);
3519 return E1000_SUCCESS;
3523 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3524 * @hw: pointer to the HW structure
3526 * The NVM checksum is updated by calling the generic update_nvm_checksum,
3527 * which writes the checksum to the shadow ram. The changes in the shadow
3528 * ram are then committed to the EEPROM by processing each bank at a time
3529 * checking for the modified bit and writing only the pending changes.
3530 * After a successful commit, the shadow ram is cleared and is ready for
3533 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3535 struct e1000_nvm_info *nvm = &hw->nvm;
3536 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3537 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3541 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3543 ret_val = e1000_update_nvm_checksum_generic(hw);
3547 if (nvm->type != e1000_nvm_flash_sw)
3550 nvm->ops.acquire(hw);
3552 /* We're writing to the opposite bank so if we're on bank 1,
3553 * write to bank 0 etc. We also need to erase the segment that
3554 * is going to be written
3556 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3557 if (ret_val != E1000_SUCCESS) {
3558 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3563 new_bank_offset = nvm->flash_bank_size;
3564 old_bank_offset = 0;
3565 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3569 old_bank_offset = nvm->flash_bank_size;
3570 new_bank_offset = 0;
3571 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3576 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3577 /* Determine whether to write the value stored
3578 * in the other NVM bank or a modified value stored
3581 if (dev_spec->shadow_ram[i].modified) {
3582 data = dev_spec->shadow_ram[i].value;
3584 ret_val = e1000_read_flash_word_ich8lan(hw, i +
3591 /* If the word is 0x13, then make sure the signature bits
3592 * (15:14) are 11b until the commit has completed.
3593 * This will allow us to write 10b which indicates the
3594 * signature is valid. We want to do this after the write
3595 * has completed so that we don't mark the segment valid
3596 * while the write is still in progress
3598 if (i == E1000_ICH_NVM_SIG_WORD)
3599 data |= E1000_ICH_NVM_SIG_MASK;
3601 /* Convert offset to bytes. */
3602 act_offset = (i + new_bank_offset) << 1;
3605 /* Write the bytes to the new bank. */
3606 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3613 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3620 /* Don't bother writing the segment valid bits if sector
3621 * programming failed.
3624 DEBUGOUT("Flash commit failed.\n");
3628 /* Finally validate the new segment by setting bit 15:14
3629 * to 10b in word 0x13 , this can be done without an
3630 * erase as well since these bits are 11 to start with
3631 * and we need to change bit 14 to 0b
3633 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3634 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3639 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3645 /* And invalidate the previously valid segment by setting
3646 * its signature word (0x13) high_byte to 0b. This can be
3647 * done without an erase because flash erase sets all bits
3648 * to 1's. We can write 1's to 0's without an erase
3650 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3651 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3655 /* Great! Everything worked, we can now clear the cached entries. */
3656 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3657 dev_spec->shadow_ram[i].modified = false;
3658 dev_spec->shadow_ram[i].value = 0xFFFF;
3662 nvm->ops.release(hw);
3664 /* Reload the EEPROM, or else modifications will not appear
3665 * until after the next adapter reset.
3668 nvm->ops.reload(hw);
3674 DEBUGOUT1("NVM update error: %d\n", ret_val);
3680 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3681 * @hw: pointer to the HW structure
3683 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3684 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
3685 * calculated, in which case we need to calculate the checksum and set bit 6.
3687 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3692 u16 valid_csum_mask;
3694 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3696 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
3697 * the checksum needs to be fixed. This bit is an indication that
3698 * the NVM was prepared by OEM software and did not calculate
3699 * the checksum...a likely scenario.
3701 switch (hw->mac.type) {
3704 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3707 word = NVM_FUTURE_INIT_WORD1;
3708 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3712 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3716 if (!(data & valid_csum_mask)) {
3717 data |= valid_csum_mask;
3718 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3721 ret_val = hw->nvm.ops.update(hw);
3726 return e1000_validate_nvm_checksum_generic(hw);
3730 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3731 * @hw: pointer to the HW structure
3732 * @offset: The offset (in bytes) of the byte/word to read.
3733 * @size: Size of data to read, 1=byte 2=word
3734 * @data: The byte(s) to write to the NVM.
3736 * Writes one/two bytes to the NVM using the flash access registers.
3738 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3741 union ich8_hws_flash_status hsfsts;
3742 union ich8_hws_flash_ctrl hsflctl;
3743 u32 flash_linear_addr;
3748 DEBUGFUNC("e1000_write_ich8_data");
3750 if (size < 1 || size > 2 || data > size * 0xff ||
3751 offset > ICH_FLASH_LINEAR_ADDR_MASK)
3752 return -E1000_ERR_NVM;
3754 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3755 hw->nvm.flash_base_addr);
3760 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3761 if (ret_val != E1000_SUCCESS)
3764 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3765 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3766 hsflctl.hsf_ctrl.fldbcount = size - 1;
3767 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3768 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3770 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3773 flash_data = (u32)data & 0x00FF;
3775 flash_data = (u32)data;
3777 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3779 /* check if FCERR is set to 1 , if set to 1, clear it
3780 * and try the whole sequence a few more times else done
3783 e1000_flash_cycle_ich8lan(hw,
3784 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3785 if (ret_val == E1000_SUCCESS)
3788 /* If we're here, then things are most likely
3789 * completely hosed, but if the error condition
3790 * is detected, it won't hurt to give it another
3791 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3793 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3794 if (hsfsts.hsf_status.flcerr)
3795 /* Repeat for some time before giving up. */
3797 if (!hsfsts.hsf_status.flcdone) {
3798 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3801 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3807 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3808 * @hw: pointer to the HW structure
3809 * @offset: The index of the byte to read.
3810 * @data: The byte to write to the NVM.
3812 * Writes a single byte to the NVM using the flash access registers.
3814 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3817 u16 word = (u16)data;
3819 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3821 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3825 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3826 * @hw: pointer to the HW structure
3827 * @offset: The offset of the byte to write.
3828 * @byte: The byte to write to the NVM.
3830 * Writes a single byte to the NVM using the flash access registers.
3831 * Goes through a retry algorithm before giving up.
3833 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3834 u32 offset, u8 byte)
3837 u16 program_retries;
3839 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3841 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3845 for (program_retries = 0; program_retries < 100; program_retries++) {
3846 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3848 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3849 if (ret_val == E1000_SUCCESS)
3852 if (program_retries == 100)
3853 return -E1000_ERR_NVM;
3855 return E1000_SUCCESS;
3859 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3860 * @hw: pointer to the HW structure
3861 * @bank: 0 for first bank, 1 for second bank, etc.
3863 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3864 * bank N is 4096 * N + flash_reg_addr.
3866 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3868 struct e1000_nvm_info *nvm = &hw->nvm;
3869 union ich8_hws_flash_status hsfsts;
3870 union ich8_hws_flash_ctrl hsflctl;
3871 u32 flash_linear_addr;
3872 /* bank size is in 16bit words - adjust to bytes */
3873 u32 flash_bank_size = nvm->flash_bank_size * 2;
3876 s32 j, iteration, sector_size;
3878 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3880 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3882 /* Determine HW Sector size: Read BERASE bits of hw flash status
3884 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3885 * consecutive sectors. The start index for the nth Hw sector
3886 * can be calculated as = bank * 4096 + n * 256
3887 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3888 * The start index for the nth Hw sector can be calculated
3890 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3891 * (ich9 only, otherwise error condition)
3892 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3894 switch (hsfsts.hsf_status.berasesz) {
3896 /* Hw sector size 256 */
3897 sector_size = ICH_FLASH_SEG_SIZE_256;
3898 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3901 sector_size = ICH_FLASH_SEG_SIZE_4K;
3905 sector_size = ICH_FLASH_SEG_SIZE_8K;
3909 sector_size = ICH_FLASH_SEG_SIZE_64K;
3913 return -E1000_ERR_NVM;
3916 /* Start with the base address, then add the sector offset. */
3917 flash_linear_addr = hw->nvm.flash_base_addr;
3918 flash_linear_addr += (bank) ? flash_bank_size : 0;
3920 for (j = 0; j < iteration; j++) {
3922 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3925 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3929 /* Write a value 11 (block Erase) in Flash
3930 * Cycle field in hw flash control
3932 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3934 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3935 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3938 /* Write the last 24 bits of an index within the
3939 * block into Flash Linear address field in Flash
3942 flash_linear_addr += (j * sector_size);
3943 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3946 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3947 if (ret_val == E1000_SUCCESS)
3950 /* Check if FCERR is set to 1. If 1,
3951 * clear it and try the whole sequence
3952 * a few more times else Done
3954 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3956 if (hsfsts.hsf_status.flcerr)
3957 /* repeat for some time before giving up */
3959 else if (!hsfsts.hsf_status.flcdone)
3961 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3964 return E1000_SUCCESS;
3968 * e1000_valid_led_default_ich8lan - Set the default LED settings
3969 * @hw: pointer to the HW structure
3970 * @data: Pointer to the LED settings
3972 * Reads the LED default settings from the NVM to data. If the NVM LED
3973 * settings is all 0's or F's, set the LED default to a valid LED default
3976 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3980 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3982 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3984 DEBUGOUT("NVM Read Error\n");
3988 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3989 *data = ID_LED_DEFAULT_ICH8LAN;
3991 return E1000_SUCCESS;
3995 * e1000_id_led_init_pchlan - store LED configurations
3996 * @hw: pointer to the HW structure
3998 * PCH does not control LEDs via the LEDCTL register, rather it uses
3999 * the PHY LED configuration register.
4001 * PCH also does not have an "always on" or "always off" mode which
4002 * complicates the ID feature. Instead of using the "on" mode to indicate
4003 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4004 * use "link_up" mode. The LEDs will still ID on request if there is no
4005 * link based on logic in e1000_led_[on|off]_pchlan().
4007 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4009 struct e1000_mac_info *mac = &hw->mac;
4011 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4012 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4013 u16 data, i, temp, shift;
4015 DEBUGFUNC("e1000_id_led_init_pchlan");
4017 /* Get default ID LED modes */
4018 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4022 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4023 mac->ledctl_mode1 = mac->ledctl_default;
4024 mac->ledctl_mode2 = mac->ledctl_default;
4026 for (i = 0; i < 4; i++) {
4027 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4030 case ID_LED_ON1_DEF2:
4031 case ID_LED_ON1_ON2:
4032 case ID_LED_ON1_OFF2:
4033 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4034 mac->ledctl_mode1 |= (ledctl_on << shift);
4036 case ID_LED_OFF1_DEF2:
4037 case ID_LED_OFF1_ON2:
4038 case ID_LED_OFF1_OFF2:
4039 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4040 mac->ledctl_mode1 |= (ledctl_off << shift);
4047 case ID_LED_DEF1_ON2:
4048 case ID_LED_ON1_ON2:
4049 case ID_LED_OFF1_ON2:
4050 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4051 mac->ledctl_mode2 |= (ledctl_on << shift);
4053 case ID_LED_DEF1_OFF2:
4054 case ID_LED_ON1_OFF2:
4055 case ID_LED_OFF1_OFF2:
4056 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4057 mac->ledctl_mode2 |= (ledctl_off << shift);
4065 return E1000_SUCCESS;
4069 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4070 * @hw: pointer to the HW structure
4072 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4073 * register, so the the bus width is hard coded.
4075 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4077 struct e1000_bus_info *bus = &hw->bus;
4080 DEBUGFUNC("e1000_get_bus_info_ich8lan");
4082 ret_val = e1000_get_bus_info_pcie_generic(hw);
4084 /* ICH devices are "PCI Express"-ish. They have
4085 * a configuration space, but do not contain
4086 * PCI Express Capability registers, so bus width
4087 * must be hardcoded.
4089 if (bus->width == e1000_bus_width_unknown)
4090 bus->width = e1000_bus_width_pcie_x1;
4096 * e1000_reset_hw_ich8lan - Reset the hardware
4097 * @hw: pointer to the HW structure
4099 * Does a full reset of the hardware which includes a reset of the PHY and
4102 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4104 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4109 DEBUGFUNC("e1000_reset_hw_ich8lan");
4111 /* Prevent the PCI-E bus from sticking if there is no TLP connection
4112 * on the last TLP read/write transaction when MAC is reset.
4114 ret_val = e1000_disable_pcie_master_generic(hw);
4116 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4118 DEBUGOUT("Masking off all interrupts\n");
4119 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4121 /* Disable the Transmit and Receive units. Then delay to allow
4122 * any pending transactions to complete before we hit the MAC
4123 * with the global reset.
4125 E1000_WRITE_REG(hw, E1000_RCTL, 0);
4126 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4127 E1000_WRITE_FLUSH(hw);
4131 /* Workaround for ICH8 bit corruption issue in FIFO memory */
4132 if (hw->mac.type == e1000_ich8lan) {
4133 /* Set Tx and Rx buffer allocation to 8k apiece. */
4134 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4135 /* Set Packet Buffer Size to 16k. */
4136 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4139 if (hw->mac.type == e1000_pchlan) {
4140 /* Save the NVM K1 bit setting*/
4141 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4145 if (kum_cfg & E1000_NVM_K1_ENABLE)
4146 dev_spec->nvm_k1_enabled = true;
4148 dev_spec->nvm_k1_enabled = false;
4151 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4153 if (!hw->phy.ops.check_reset_block(hw)) {
4154 /* Full-chip reset requires MAC and PHY reset at the same
4155 * time to make sure the interface between MAC and the
4156 * external PHY is reset.
4158 ctrl |= E1000_CTRL_PHY_RST;
4160 /* Gate automatic PHY configuration by hardware on
4163 if ((hw->mac.type == e1000_pch2lan) &&
4164 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4165 e1000_gate_hw_phy_config_ich8lan(hw, true);
4167 ret_val = e1000_acquire_swflag_ich8lan(hw);
4168 DEBUGOUT("Issuing a global reset to ich8lan\n");
4169 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4170 /* cannot issue a flush here because it hangs the hardware */
4173 /* Set Phy Config Counter to 50msec */
4174 if (hw->mac.type == e1000_pch2lan) {
4175 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4176 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4177 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4178 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4182 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4184 if (ctrl & E1000_CTRL_PHY_RST) {
4185 ret_val = hw->phy.ops.get_cfg_done(hw);
4189 ret_val = e1000_post_phy_reset_ich8lan(hw);
4194 /* For PCH, this write will make sure that any noise
4195 * will be detected as a CRC error and be dropped rather than show up
4196 * as a bad packet to the DMA engine.
4198 if (hw->mac.type == e1000_pchlan)
4199 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4201 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4202 E1000_READ_REG(hw, E1000_ICR);
4204 reg = E1000_READ_REG(hw, E1000_KABGTXD);
4205 reg |= E1000_KABGTXD_BGSQLBIAS;
4206 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4208 return E1000_SUCCESS;
4212 * e1000_init_hw_ich8lan - Initialize the hardware
4213 * @hw: pointer to the HW structure
4215 * Prepares the hardware for transmit and receive by doing the following:
4216 * - initialize hardware bits
4217 * - initialize LED identification
4218 * - setup receive address registers
4219 * - setup flow control
4220 * - setup transmit descriptors
4221 * - clear statistics
4223 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4225 struct e1000_mac_info *mac = &hw->mac;
4226 u32 ctrl_ext, txdctl, snoop;
4230 DEBUGFUNC("e1000_init_hw_ich8lan");
4232 e1000_initialize_hw_bits_ich8lan(hw);
4234 /* Initialize identification LED */
4235 ret_val = mac->ops.id_led_init(hw);
4236 /* An error is not fatal and we should not stop init due to this */
4238 DEBUGOUT("Error initializing identification LED\n");
4240 /* Setup the receive address. */
4241 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4243 /* Zero out the Multicast HASH table */
4244 DEBUGOUT("Zeroing the MTA\n");
4245 for (i = 0; i < mac->mta_reg_count; i++)
4246 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4248 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4249 * the ME. Disable wakeup by clearing the host wakeup bit.
4250 * Reset the phy after disabling host wakeup to reset the Rx buffer.
4252 if (hw->phy.type == e1000_phy_82578) {
4253 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4254 i &= ~BM_WUC_HOST_WU_BIT;
4255 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4256 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4261 /* Setup link and flow control */
4262 ret_val = mac->ops.setup_link(hw);
4264 /* Set the transmit descriptor write-back policy for both queues */
4265 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4266 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4267 E1000_TXDCTL_FULL_TX_DESC_WB);
4268 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4269 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4270 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4271 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4272 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4273 E1000_TXDCTL_FULL_TX_DESC_WB);
4274 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4275 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4276 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4278 /* ICH8 has opposite polarity of no_snoop bits.
4279 * By default, we should use snoop behavior.
4281 if (mac->type == e1000_ich8lan)
4282 snoop = PCIE_ICH8_SNOOP_ALL;
4284 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4285 e1000_set_pcie_no_snoop_generic(hw, snoop);
4287 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4288 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4289 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4291 /* Clear all of the statistics registers (clear on read). It is
4292 * important that we do this after we have tried to establish link
4293 * because the symbol error count will increment wildly if there
4296 e1000_clear_hw_cntrs_ich8lan(hw);
4302 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4303 * @hw: pointer to the HW structure
4305 * Sets/Clears required hardware bits necessary for correctly setting up the
4306 * hardware for transmit and receive.
4308 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4312 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4314 /* Extended Device Control */
4315 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4317 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4318 if (hw->mac.type >= e1000_pchlan)
4319 reg |= E1000_CTRL_EXT_PHYPDEN;
4320 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4322 /* Transmit Descriptor Control 0 */
4323 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4325 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4327 /* Transmit Descriptor Control 1 */
4328 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4330 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4332 /* Transmit Arbitration Control 0 */
4333 reg = E1000_READ_REG(hw, E1000_TARC(0));
4334 if (hw->mac.type == e1000_ich8lan)
4335 reg |= (1 << 28) | (1 << 29);
4336 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4337 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4339 /* Transmit Arbitration Control 1 */
4340 reg = E1000_READ_REG(hw, E1000_TARC(1));
4341 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4345 reg |= (1 << 24) | (1 << 26) | (1 << 30);
4346 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4349 if (hw->mac.type == e1000_ich8lan) {
4350 reg = E1000_READ_REG(hw, E1000_STATUS);
4352 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4355 /* work-around descriptor data corruption issue during nfs v2 udp
4356 * traffic, just disable the nfs filtering capability
4358 reg = E1000_READ_REG(hw, E1000_RFCTL);
4359 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4361 /* Disable IPv6 extension header parsing because some malformed
4362 * IPv6 headers can hang the Rx.
4364 if (hw->mac.type == e1000_ich8lan)
4365 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4366 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4368 /* Enable ECC on Lynxpoint */
4369 if (hw->mac.type == e1000_pch_lpt) {
4370 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4371 reg |= E1000_PBECCSTS_ECC_ENABLE;
4372 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4374 reg = E1000_READ_REG(hw, E1000_CTRL);
4375 reg |= E1000_CTRL_MEHE;
4376 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4383 * e1000_setup_link_ich8lan - Setup flow control and link settings
4384 * @hw: pointer to the HW structure
4386 * Determines which flow control settings to use, then configures flow
4387 * control. Calls the appropriate media-specific link configuration
4388 * function. Assuming the adapter has a valid link partner, a valid link
4389 * should be established. Assumes the hardware has previously been reset
4390 * and the transmitter and receiver are not enabled.
4392 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4396 DEBUGFUNC("e1000_setup_link_ich8lan");
4398 if (hw->phy.ops.check_reset_block(hw))
4399 return E1000_SUCCESS;
4401 /* ICH parts do not have a word in the NVM to determine
4402 * the default flow control setting, so we explicitly
4405 if (hw->fc.requested_mode == e1000_fc_default)
4406 hw->fc.requested_mode = e1000_fc_full;
4408 /* Save off the requested flow control mode for use later. Depending
4409 * on the link partner's capabilities, we may or may not use this mode.
4411 hw->fc.current_mode = hw->fc.requested_mode;
4413 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4414 hw->fc.current_mode);
4416 /* Continue to configure the copper link. */
4417 ret_val = hw->mac.ops.setup_physical_interface(hw);
4421 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4422 if ((hw->phy.type == e1000_phy_82578) ||
4423 (hw->phy.type == e1000_phy_82579) ||
4424 (hw->phy.type == e1000_phy_i217) ||
4425 (hw->phy.type == e1000_phy_82577)) {
4426 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4428 ret_val = hw->phy.ops.write_reg(hw,
4429 PHY_REG(BM_PORT_CTRL_PAGE, 27),
4435 return e1000_set_fc_watermarks_generic(hw);
4439 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4440 * @hw: pointer to the HW structure
4442 * Configures the kumeran interface to the PHY to wait the appropriate time
4443 * when polling the PHY, then call the generic setup_copper_link to finish
4444 * configuring the copper link.
4446 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4452 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4454 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4455 ctrl |= E1000_CTRL_SLU;
4456 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4457 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4459 /* Set the mac to wait the maximum time between each iteration
4460 * and increase the max iterations when polling the phy;
4461 * this fixes erroneous timeouts at 10Mbps.
4463 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4467 ret_val = e1000_read_kmrn_reg_generic(hw,
4468 E1000_KMRNCTRLSTA_INBAND_PARAM,
4473 ret_val = e1000_write_kmrn_reg_generic(hw,
4474 E1000_KMRNCTRLSTA_INBAND_PARAM,
4479 switch (hw->phy.type) {
4480 case e1000_phy_igp_3:
4481 ret_val = e1000_copper_link_setup_igp(hw);
4486 case e1000_phy_82578:
4487 ret_val = e1000_copper_link_setup_m88(hw);
4491 case e1000_phy_82577:
4492 case e1000_phy_82579:
4493 ret_val = e1000_copper_link_setup_82577(hw);
4498 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4503 reg_data &= ~IFE_PMC_AUTO_MDIX;
4505 switch (hw->phy.mdix) {
4507 reg_data &= ~IFE_PMC_FORCE_MDIX;
4510 reg_data |= IFE_PMC_FORCE_MDIX;
4514 reg_data |= IFE_PMC_AUTO_MDIX;
4517 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4526 return e1000_setup_copper_link_generic(hw);
4530 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4531 * @hw: pointer to the HW structure
4533 * Calls the PHY specific link setup function and then calls the
4534 * generic setup_copper_link to finish configuring the link for
4535 * Lynxpoint PCH devices
4537 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4542 DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4544 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4545 ctrl |= E1000_CTRL_SLU;
4546 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4547 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4549 ret_val = e1000_copper_link_setup_82577(hw);
4553 return e1000_setup_copper_link_generic(hw);
4557 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4558 * @hw: pointer to the HW structure
4559 * @speed: pointer to store current link speed
4560 * @duplex: pointer to store the current link duplex
4562 * Calls the generic get_speed_and_duplex to retrieve the current link
4563 * information and then calls the Kumeran lock loss workaround for links at
4566 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4571 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4573 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4577 if ((hw->mac.type == e1000_ich8lan) &&
4578 (hw->phy.type == e1000_phy_igp_3) &&
4579 (*speed == SPEED_1000)) {
4580 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4587 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4588 * @hw: pointer to the HW structure
4590 * Work-around for 82566 Kumeran PCS lock loss:
4591 * On link status change (i.e. PCI reset, speed change) and link is up and
4593 * 0) if workaround is optionally disabled do nothing
4594 * 1) wait 1ms for Kumeran link to come up
4595 * 2) check Kumeran Diagnostic register PCS lock loss bit
4596 * 3) if not set the link is locked (all is good), otherwise...
4598 * 5) repeat up to 10 times
4599 * Note: this is only called for IGP3 copper when speed is 1gb.
4601 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4603 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4609 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4611 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4612 return E1000_SUCCESS;
4614 /* Make sure link is up before proceeding. If not just return.
4615 * Attempting this while link is negotiating fouled up link
4618 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4620 return E1000_SUCCESS;
4622 for (i = 0; i < 10; i++) {
4623 /* read once to clear */
4624 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4627 /* and again to get new status */
4628 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4632 /* check for PCS lock */
4633 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4634 return E1000_SUCCESS;
4636 /* Issue PHY reset */
4637 hw->phy.ops.reset(hw);
4640 /* Disable GigE link negotiation */
4641 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4642 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4643 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4644 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4646 /* Call gig speed drop workaround on Gig disable before accessing
4649 e1000_gig_downshift_workaround_ich8lan(hw);
4651 /* unable to acquire PCS lock */
4652 return -E1000_ERR_PHY;
4656 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4657 * @hw: pointer to the HW structure
4658 * @state: boolean value used to set the current Kumeran workaround state
4660 * If ICH8, set the current Kumeran workaround state (enabled - true
4661 * /disabled - false).
4663 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4666 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4668 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4670 if (hw->mac.type != e1000_ich8lan) {
4671 DEBUGOUT("Workaround applies to ICH8 only.\n");
4675 dev_spec->kmrn_lock_loss_workaround_enabled = state;
4681 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4682 * @hw: pointer to the HW structure
4684 * Workaround for 82566 power-down on D3 entry:
4685 * 1) disable gigabit link
4686 * 2) write VR power-down enable
4688 * Continue if successful, else issue LCD reset and repeat
4690 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4696 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4698 if (hw->phy.type != e1000_phy_igp_3)
4701 /* Try the workaround twice (if needed) */
4704 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4705 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4706 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4707 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4709 /* Call gig speed drop workaround on Gig disable before
4710 * accessing any PHY registers
4712 if (hw->mac.type == e1000_ich8lan)
4713 e1000_gig_downshift_workaround_ich8lan(hw);
4715 /* Write VR power-down enable */
4716 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4717 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4718 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4719 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4721 /* Read it back and test */
4722 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4723 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4724 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4727 /* Issue PHY reset and repeat at most one more time */
4728 reg = E1000_READ_REG(hw, E1000_CTRL);
4729 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4735 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4736 * @hw: pointer to the HW structure
4738 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4739 * LPLU, Gig disable, MDIC PHY reset):
4740 * 1) Set Kumeran Near-end loopback
4741 * 2) Clear Kumeran Near-end loopback
4742 * Should only be called for ICH8[m] devices with any 1G Phy.
4744 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4749 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4751 if ((hw->mac.type != e1000_ich8lan) ||
4752 (hw->phy.type == e1000_phy_ife))
4755 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4759 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4760 ret_val = e1000_write_kmrn_reg_generic(hw,
4761 E1000_KMRNCTRLSTA_DIAG_OFFSET,
4765 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4766 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4771 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4772 * @hw: pointer to the HW structure
4774 * During S0 to Sx transition, it is possible the link remains at gig
4775 * instead of negotiating to a lower speed. Before going to Sx, set
4776 * 'Gig Disable' to force link speed negotiation to a lower speed based on
4777 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
4778 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4779 * needs to be written.
4780 * Parts that support (and are linked to a partner which support) EEE in
4781 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4782 * than 10Mbps w/o EEE.
4784 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4786 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4790 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4792 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4793 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4795 if (hw->phy.type == e1000_phy_i217) {
4796 u16 phy_reg, device_id = hw->device_id;
4798 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4799 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4800 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4802 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4803 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4806 ret_val = hw->phy.ops.acquire(hw);
4810 if (!dev_spec->eee_disable) {
4814 e1000_read_emi_reg_locked(hw,
4815 I217_EEE_ADVERTISEMENT,
4820 /* Disable LPLU if both link partners support 100BaseT
4821 * EEE and 100Full is advertised on both ends of the
4822 * link, and enable Auto Enable LPI since there will
4823 * be no driver to enable LPI while in Sx.
4825 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4826 (dev_spec->eee_lp_ability &
4827 I82579_EEE_100_SUPPORTED) &&
4828 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4829 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4830 E1000_PHY_CTRL_NOND0A_LPLU);
4832 /* Set Auto Enable LPI after link up */
4833 hw->phy.ops.read_reg_locked(hw,
4836 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4837 hw->phy.ops.write_reg_locked(hw,
4843 /* For i217 Intel Rapid Start Technology support,
4844 * when the system is going into Sx and no manageability engine
4845 * is present, the driver must configure proxy to reset only on
4846 * power good. LPI (Low Power Idle) state must also reset only
4847 * on power good, as well as the MTA (Multicast table array).
4848 * The SMBus release must also be disabled on LCD reset.
4850 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4851 E1000_ICH_FWSM_FW_VALID)) {
4852 /* Enable proxy to reset only on power good. */
4853 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4855 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4856 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4859 /* Set bit enable LPI (EEE) to reset only on
4862 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4863 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4864 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4866 /* Disable the SMB release on LCD reset. */
4867 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4868 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4869 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4872 /* Enable MTA to reset for Intel Rapid Start Technology
4875 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4876 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4877 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4880 hw->phy.ops.release(hw);
4883 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4885 if (hw->mac.type == e1000_ich8lan)
4886 e1000_gig_downshift_workaround_ich8lan(hw);
4888 if (hw->mac.type >= e1000_pchlan) {
4889 e1000_oem_bits_config_ich8lan(hw, false);
4891 /* Reset PHY to activate OEM bits on 82577/8 */
4892 if (hw->mac.type == e1000_pchlan)
4893 e1000_phy_hw_reset_generic(hw);
4895 ret_val = hw->phy.ops.acquire(hw);
4898 e1000_write_smbus_addr(hw);
4899 hw->phy.ops.release(hw);
4906 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4907 * @hw: pointer to the HW structure
4909 * During Sx to S0 transitions on non-managed devices or managed devices
4910 * on which PHY resets are not blocked, if the PHY registers cannot be
4911 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
4913 * On i217, setup Intel Rapid Start Technology.
4915 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4919 DEBUGFUNC("e1000_resume_workarounds_pchlan");
4921 if (hw->mac.type < e1000_pch2lan)
4924 ret_val = e1000_init_phy_workarounds_pchlan(hw);
4926 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4930 /* For i217 Intel Rapid Start Technology support when the system
4931 * is transitioning from Sx and no manageability engine is present
4932 * configure SMBus to restore on reset, disable proxy, and enable
4933 * the reset on MTA (Multicast table array).
4935 if (hw->phy.type == e1000_phy_i217) {
4938 ret_val = hw->phy.ops.acquire(hw);
4940 DEBUGOUT("Failed to setup iRST\n");
4944 /* Clear Auto Enable LPI after link up */
4945 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
4946 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4947 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
4949 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4950 E1000_ICH_FWSM_FW_VALID)) {
4951 /* Restore clear on SMB if no manageability engine
4954 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4958 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4959 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4962 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4964 /* Enable reset on MTA */
4965 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4969 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4970 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4973 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4974 hw->phy.ops.release(hw);
4979 * e1000_cleanup_led_ich8lan - Restore the default LED operation
4980 * @hw: pointer to the HW structure
4982 * Return the LED back to the default configuration.
4984 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4986 DEBUGFUNC("e1000_cleanup_led_ich8lan");
4988 if (hw->phy.type == e1000_phy_ife)
4989 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4992 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4993 return E1000_SUCCESS;
4997 * e1000_led_on_ich8lan - Turn LEDs on
4998 * @hw: pointer to the HW structure
5002 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5004 DEBUGFUNC("e1000_led_on_ich8lan");
5006 if (hw->phy.type == e1000_phy_ife)
5007 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5008 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5010 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5011 return E1000_SUCCESS;
5015 * e1000_led_off_ich8lan - Turn LEDs off
5016 * @hw: pointer to the HW structure
5018 * Turn off the LEDs.
5020 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5022 DEBUGFUNC("e1000_led_off_ich8lan");
5024 if (hw->phy.type == e1000_phy_ife)
5025 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5026 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5028 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5029 return E1000_SUCCESS;
5033 * e1000_setup_led_pchlan - Configures SW controllable LED
5034 * @hw: pointer to the HW structure
5036 * This prepares the SW controllable LED for use.
5038 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5040 DEBUGFUNC("e1000_setup_led_pchlan");
5042 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5043 (u16)hw->mac.ledctl_mode1);
5047 * e1000_cleanup_led_pchlan - Restore the default LED operation
5048 * @hw: pointer to the HW structure
5050 * Return the LED back to the default configuration.
5052 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5054 DEBUGFUNC("e1000_cleanup_led_pchlan");
5056 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5057 (u16)hw->mac.ledctl_default);
5061 * e1000_led_on_pchlan - Turn LEDs on
5062 * @hw: pointer to the HW structure
5066 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5068 u16 data = (u16)hw->mac.ledctl_mode2;
5071 DEBUGFUNC("e1000_led_on_pchlan");
5073 /* If no link, then turn LED on by setting the invert bit
5074 * for each LED that's mode is "link_up" in ledctl_mode2.
5076 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5077 for (i = 0; i < 3; i++) {
5078 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5079 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5080 E1000_LEDCTL_MODE_LINK_UP)
5082 if (led & E1000_PHY_LED0_IVRT)
5083 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5085 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5089 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5093 * e1000_led_off_pchlan - Turn LEDs off
5094 * @hw: pointer to the HW structure
5096 * Turn off the LEDs.
5098 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5100 u16 data = (u16)hw->mac.ledctl_mode1;
5103 DEBUGFUNC("e1000_led_off_pchlan");
5105 /* If no link, then turn LED off by clearing the invert bit
5106 * for each LED that's mode is "link_up" in ledctl_mode1.
5108 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5109 for (i = 0; i < 3; i++) {
5110 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5111 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5112 E1000_LEDCTL_MODE_LINK_UP)
5114 if (led & E1000_PHY_LED0_IVRT)
5115 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5117 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5121 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5125 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5126 * @hw: pointer to the HW structure
5128 * Read appropriate register for the config done bit for completion status
5129 * and configure the PHY through s/w for EEPROM-less parts.
5131 * NOTE: some silicon which is EEPROM-less will fail trying to read the
5132 * config done bit, so only an error is logged and continues. If we were
5133 * to return with error, EEPROM-less silicon would not be able to be reset
5136 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5138 s32 ret_val = E1000_SUCCESS;
5142 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5144 e1000_get_cfg_done_generic(hw);
5146 /* Wait for indication from h/w that it has completed basic config */
5147 if (hw->mac.type >= e1000_ich10lan) {
5148 e1000_lan_init_done_ich8lan(hw);
5150 ret_val = e1000_get_auto_rd_done_generic(hw);
5152 /* When auto config read does not complete, do not
5153 * return with an error. This can happen in situations
5154 * where there is no eeprom and prevents getting link.
5156 DEBUGOUT("Auto Read Done did not complete\n");
5157 ret_val = E1000_SUCCESS;
5161 /* Clear PHY Reset Asserted bit */
5162 status = E1000_READ_REG(hw, E1000_STATUS);
5163 if (status & E1000_STATUS_PHYRA)
5164 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5166 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5168 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5169 if (hw->mac.type <= e1000_ich9lan) {
5170 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5171 (hw->phy.type == e1000_phy_igp_3)) {
5172 e1000_phy_init_script_igp3(hw);
5175 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5176 /* Maybe we should do a basic PHY config */
5177 DEBUGOUT("EEPROM not present\n");
5178 ret_val = -E1000_ERR_CONFIG;
5186 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5187 * @hw: pointer to the HW structure
5189 * In the case of a PHY power down to save power, or to turn off link during a
5190 * driver unload, or wake on lan is not enabled, remove the link.
5192 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5194 /* If the management interface is not enabled, then power down */
5195 if (!(hw->mac.ops.check_mng_mode(hw) ||
5196 hw->phy.ops.check_reset_block(hw)))
5197 e1000_power_down_phy_copper(hw);
5203 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5204 * @hw: pointer to the HW structure
5206 * Clears hardware counters specific to the silicon family and calls
5207 * clear_hw_cntrs_generic to clear all general purpose counters.
5209 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5214 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5216 e1000_clear_hw_cntrs_base_generic(hw);
5218 E1000_READ_REG(hw, E1000_ALGNERRC);
5219 E1000_READ_REG(hw, E1000_RXERRC);
5220 E1000_READ_REG(hw, E1000_TNCRS);
5221 E1000_READ_REG(hw, E1000_CEXTERR);
5222 E1000_READ_REG(hw, E1000_TSCTC);
5223 E1000_READ_REG(hw, E1000_TSCTFC);
5225 E1000_READ_REG(hw, E1000_MGTPRC);
5226 E1000_READ_REG(hw, E1000_MGTPDC);
5227 E1000_READ_REG(hw, E1000_MGTPTC);
5229 E1000_READ_REG(hw, E1000_IAC);
5230 E1000_READ_REG(hw, E1000_ICRXOC);
5232 /* Clear PHY statistics registers */
5233 if ((hw->phy.type == e1000_phy_82578) ||
5234 (hw->phy.type == e1000_phy_82579) ||
5235 (hw->phy.type == e1000_phy_i217) ||
5236 (hw->phy.type == e1000_phy_82577)) {
5237 ret_val = hw->phy.ops.acquire(hw);
5240 ret_val = hw->phy.ops.set_page(hw,
5241 HV_STATS_PAGE << IGP_PAGE_SHIFT);
5244 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5245 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5246 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5247 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5248 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5249 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5250 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5251 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5252 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5253 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5254 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5255 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5256 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5257 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5259 hw->phy.ops.release(hw);