1 /*******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 /* 82562G 10/100 Network Connection
35 * 82562G-2 10/100 Network Connection
36 * 82562GT 10/100 Network Connection
37 * 82562GT-2 10/100 Network Connection
38 * 82562V 10/100 Network Connection
39 * 82562V-2 10/100 Network Connection
40 * 82566DC-2 Gigabit Network Connection
41 * 82566DC Gigabit Network Connection
42 * 82566DM-2 Gigabit Network Connection
43 * 82566DM Gigabit Network Connection
44 * 82566MC Gigabit Network Connection
45 * 82566MM Gigabit Network Connection
46 * 82567LM Gigabit Network Connection
47 * 82567LF Gigabit Network Connection
48 * 82567V Gigabit Network Connection
49 * 82567LM-2 Gigabit Network Connection
50 * 82567LF-2 Gigabit Network Connection
51 * 82567V-2 Gigabit Network Connection
52 * 82567LF-3 Gigabit Network Connection
53 * 82567LM-3 Gigabit Network Connection
54 * 82567LM-4 Gigabit Network Connection
55 * 82577LM Gigabit Network Connection
56 * 82577LC Gigabit Network Connection
57 * 82578DM Gigabit Network Connection
58 * 82578DC Gigabit Network Connection
59 * 82579LM Gigabit Network Connection
60 * 82579V Gigabit Network Connection
61 * Ethernet Connection I217-LM
62 * Ethernet Connection I217-V
63 * Ethernet Connection I218-V
64 * Ethernet Connection I218-LM
65 #ifdef NAHUM6_LPTH_I218_HW
66 * Ethernet Connection (2) I218-LM
67 * Ethernet Connection (2) I218-V
70 * Ethernet Connection (3) I218-LM
71 * Ethernet Connection (3) I218-V
75 #include "e1000_api.h"
77 #if defined(NAHUM6LP_HW) && defined(ULP_IN_D0_SUPPORT)
78 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
79 #endif /* NAHUM6LP_HW && ULP_SUPPORT */
80 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
81 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
82 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
83 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
84 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
85 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
86 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
87 STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
88 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
89 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
90 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
93 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
94 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
95 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
96 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
97 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
99 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
101 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
102 u16 words, u16 *data);
103 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
104 u16 words, u16 *data);
105 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
106 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
107 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
109 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
110 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
111 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
112 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
113 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
114 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
115 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
116 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
117 u16 *speed, u16 *duplex);
118 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
119 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
120 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
121 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
122 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
123 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
124 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw);
125 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw);
126 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
127 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
128 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
129 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
130 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
131 u32 offset, u8 *data);
132 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
134 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
135 u32 offset, u16 *data);
136 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
137 u32 offset, u8 byte);
138 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
139 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
140 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
141 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
142 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
143 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
145 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
146 /* Offset 04h HSFSTS */
147 union ich8_hws_flash_status {
149 u16 flcdone:1; /* bit 0 Flash Cycle Done */
150 u16 flcerr:1; /* bit 1 Flash Cycle Error */
151 u16 dael:1; /* bit 2 Direct Access error Log */
152 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
153 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
154 u16 reserved1:2; /* bit 13:6 Reserved */
155 u16 reserved2:6; /* bit 13:6 Reserved */
156 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
157 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
162 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
163 /* Offset 06h FLCTL */
164 union ich8_hws_flash_ctrl {
165 struct ich8_hsflctl {
166 u16 flcgo:1; /* 0 Flash Cycle Go */
167 u16 flcycle:2; /* 2:1 Flash Cycle */
168 u16 reserved:5; /* 7:3 Reserved */
169 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
170 u16 flockdn:6; /* 15:10 Reserved */
175 /* ICH Flash Region Access Permissions */
176 union ich8_hws_flash_regacc {
178 u32 grra:8; /* 0:7 GbE region Read Access */
179 u32 grwa:8; /* 8:15 GbE region Write Access */
180 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
181 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
187 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
188 * @hw: pointer to the HW structure
190 * Test access to the PHY registers by reading the PHY ID registers. If
191 * the PHY ID is already known (e.g. resume path) compare it with known ID,
192 * otherwise assume the read PHY ID is correct if it is valid.
194 * Assumes the sw/fw/hw semaphore is already acquired.
196 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
204 for (retry_count = 0; retry_count < 2; retry_count++) {
205 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
206 if (ret_val || (phy_reg == 0xFFFF))
208 phy_id = (u32)(phy_reg << 16);
210 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
211 if (ret_val || (phy_reg == 0xFFFF)) {
215 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
220 if (hw->phy.id == phy_id)
224 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
228 /* In case the PHY needs to be in mdio slow mode,
229 * set slow mode and try to get the PHY id again.
231 if (hw->mac.type < e1000_pch_lpt) {
232 hw->phy.ops.release(hw);
233 ret_val = e1000_set_mdio_slow_mode_hv(hw);
235 ret_val = e1000_get_phy_id(hw);
236 hw->phy.ops.acquire(hw);
242 if (hw->mac.type == e1000_pch_lpt) {
243 /* Unforce SMBus mode in PHY */
244 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
245 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
246 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
248 /* Unforce SMBus mode in MAC */
249 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
250 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
251 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
258 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
259 * @hw: pointer to the HW structure
261 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
262 * used to reset the PHY to a quiescent state when necessary.
264 void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
268 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
270 /* Set Phy Config Counter to 50msec */
271 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
272 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
273 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
274 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
276 /* Toggle LANPHYPC Value bit */
277 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
278 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
279 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
280 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
281 E1000_WRITE_FLUSH(hw);
283 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
284 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
285 E1000_WRITE_FLUSH(hw);
287 if (hw->mac.type < e1000_pch_lpt) {
294 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
295 E1000_CTRL_EXT_LPCD) && count--);
302 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
303 * @hw: pointer to the HW structure
305 * Workarounds/flow necessary for PHY initialization during driver load
308 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
310 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
313 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
315 /* Gate automatic PHY configuration by hardware on managed and
316 * non-managed 82579 and newer adapters.
318 e1000_gate_hw_phy_config_ich8lan(hw, true);
320 #if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
321 /* It is not possible to be certain of the current state of ULP
322 * so forcibly disable it.
324 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
326 #endif /* NAHUM6LP_HW && ULP_SUPPORT */
327 ret_val = hw->phy.ops.acquire(hw);
329 DEBUGOUT("Failed to initialize PHY flow\n");
333 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
334 * inaccessible and resetting the PHY is not blocked, toggle the
335 * LANPHYPC Value bit to force the interconnect to PCIe mode.
337 switch (hw->mac.type) {
339 if (e1000_phy_is_accessible_pchlan(hw))
342 /* Before toggling LANPHYPC, see if PHY is accessible by
343 * forcing MAC to SMBus mode first.
345 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
346 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
347 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
349 /* Wait 50 milliseconds for MAC to finish any retries
350 * that it might be trying to perform from previous
351 * attempts to acknowledge any phy read requests.
357 if (e1000_phy_is_accessible_pchlan(hw))
362 if ((hw->mac.type == e1000_pchlan) &&
363 (fwsm & E1000_ICH_FWSM_FW_VALID))
366 if (hw->phy.ops.check_reset_block(hw)) {
367 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
368 ret_val = -E1000_ERR_PHY;
372 /* Toggle LANPHYPC Value bit */
373 e1000_toggle_lanphypc_pch_lpt(hw);
374 if (hw->mac.type >= e1000_pch_lpt) {
375 if (e1000_phy_is_accessible_pchlan(hw))
378 /* Toggling LANPHYPC brings the PHY out of SMBus mode
379 * so ensure that the MAC is also out of SMBus mode
381 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
382 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
383 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
385 if (e1000_phy_is_accessible_pchlan(hw))
388 ret_val = -E1000_ERR_PHY;
395 hw->phy.ops.release(hw);
398 /* Check to see if able to reset PHY. Print error if not */
399 if (hw->phy.ops.check_reset_block(hw)) {
400 ERROR_REPORT("Reset blocked by ME\n");
404 /* Reset the PHY before any access to it. Doing so, ensures
405 * that the PHY is in a known good state before we read/write
406 * PHY registers. The generic reset is sufficient here,
407 * because we haven't determined the PHY type yet.
409 ret_val = e1000_phy_hw_reset_generic(hw);
413 /* On a successful reset, possibly need to wait for the PHY
414 * to quiesce to an accessible state before returning control
415 * to the calling function. If the PHY does not quiesce, then
416 * return E1000E_BLK_PHY_RESET, as this is the condition that
419 ret_val = hw->phy.ops.check_reset_block(hw);
421 ERROR_REPORT("ME blocked access to PHY after reset\n");
425 /* Ungate automatic PHY configuration on non-managed 82579 */
426 if ((hw->mac.type == e1000_pch2lan) &&
427 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
429 e1000_gate_hw_phy_config_ich8lan(hw, false);
436 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
437 * @hw: pointer to the HW structure
439 * Initialize family-specific PHY parameters and function pointers.
441 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
443 struct e1000_phy_info *phy = &hw->phy;
446 DEBUGFUNC("e1000_init_phy_params_pchlan");
449 phy->reset_delay_us = 100;
451 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
452 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
453 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
454 phy->ops.set_page = e1000_set_page_igp;
455 phy->ops.read_reg = e1000_read_phy_reg_hv;
456 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
457 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
458 phy->ops.release = e1000_release_swflag_ich8lan;
459 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
460 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
461 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
462 phy->ops.write_reg = e1000_write_phy_reg_hv;
463 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
464 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
465 phy->ops.power_up = e1000_power_up_phy_copper;
466 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
467 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
469 phy->id = e1000_phy_unknown;
471 ret_val = e1000_init_phy_workarounds_pchlan(hw);
475 if (phy->id == e1000_phy_unknown)
476 switch (hw->mac.type) {
478 ret_val = e1000_get_phy_id(hw);
481 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
486 /* In case the PHY needs to be in mdio slow mode,
487 * set slow mode and try to get the PHY id again.
489 ret_val = e1000_set_mdio_slow_mode_hv(hw);
492 ret_val = e1000_get_phy_id(hw);
497 phy->type = e1000_get_phy_type_from_id(phy->id);
500 case e1000_phy_82577:
501 case e1000_phy_82579:
503 phy->ops.check_polarity = e1000_check_polarity_82577;
504 phy->ops.force_speed_duplex =
505 e1000_phy_force_speed_duplex_82577;
506 phy->ops.get_cable_length = e1000_get_cable_length_82577;
507 phy->ops.get_info = e1000_get_phy_info_82577;
508 phy->ops.commit = e1000_phy_sw_reset_generic;
510 case e1000_phy_82578:
511 phy->ops.check_polarity = e1000_check_polarity_m88;
512 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
513 phy->ops.get_cable_length = e1000_get_cable_length_m88;
514 phy->ops.get_info = e1000_get_phy_info_m88;
517 ret_val = -E1000_ERR_PHY;
525 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
526 * @hw: pointer to the HW structure
528 * Initialize family-specific PHY parameters and function pointers.
530 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
532 struct e1000_phy_info *phy = &hw->phy;
536 DEBUGFUNC("e1000_init_phy_params_ich8lan");
539 phy->reset_delay_us = 100;
541 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
542 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
543 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
544 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
545 phy->ops.read_reg = e1000_read_phy_reg_igp;
546 phy->ops.release = e1000_release_swflag_ich8lan;
547 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
548 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
549 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
550 phy->ops.write_reg = e1000_write_phy_reg_igp;
551 phy->ops.power_up = e1000_power_up_phy_copper;
552 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
554 /* We may need to do this twice - once for IGP and if that fails,
555 * we'll set BM func pointers and try again
557 ret_val = e1000_determine_phy_address(hw);
559 phy->ops.write_reg = e1000_write_phy_reg_bm;
560 phy->ops.read_reg = e1000_read_phy_reg_bm;
561 ret_val = e1000_determine_phy_address(hw);
563 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
569 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
572 ret_val = e1000_get_phy_id(hw);
579 case IGP03E1000_E_PHY_ID:
580 phy->type = e1000_phy_igp_3;
581 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
582 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
583 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
584 phy->ops.get_info = e1000_get_phy_info_igp;
585 phy->ops.check_polarity = e1000_check_polarity_igp;
586 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
589 case IFE_PLUS_E_PHY_ID:
591 phy->type = e1000_phy_ife;
592 phy->autoneg_mask = E1000_ALL_NOT_GIG;
593 phy->ops.get_info = e1000_get_phy_info_ife;
594 phy->ops.check_polarity = e1000_check_polarity_ife;
595 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
597 case BME1000_E_PHY_ID:
598 phy->type = e1000_phy_bm;
599 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
600 phy->ops.read_reg = e1000_read_phy_reg_bm;
601 phy->ops.write_reg = e1000_write_phy_reg_bm;
602 phy->ops.commit = e1000_phy_sw_reset_generic;
603 phy->ops.get_info = e1000_get_phy_info_m88;
604 phy->ops.check_polarity = e1000_check_polarity_m88;
605 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
608 return -E1000_ERR_PHY;
612 return E1000_SUCCESS;
616 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
617 * @hw: pointer to the HW structure
619 * Initialize family-specific NVM parameters and function
622 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
624 struct e1000_nvm_info *nvm = &hw->nvm;
625 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
626 u32 gfpreg, sector_base_addr, sector_end_addr;
629 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
631 /* Can't read flash registers if the register set isn't mapped. */
632 if (!hw->flash_address) {
633 DEBUGOUT("ERROR: Flash registers not mapped\n");
634 return -E1000_ERR_CONFIG;
637 nvm->type = e1000_nvm_flash_sw;
639 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
641 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
642 * Add 1 to sector_end_addr since this sector is included in
645 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
646 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
648 /* flash_base_addr is byte-aligned */
649 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
651 /* find total size of the NVM, then cut in half since the total
652 * size represents two separate NVM banks.
654 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
655 << FLASH_SECTOR_ADDR_SHIFT);
656 nvm->flash_bank_size /= 2;
657 /* Adjust to word count */
658 nvm->flash_bank_size /= sizeof(u16);
660 nvm->word_size = E1000_SHADOW_RAM_WORDS;
662 /* Clear shadow ram */
663 for (i = 0; i < nvm->word_size; i++) {
664 dev_spec->shadow_ram[i].modified = false;
665 dev_spec->shadow_ram[i].value = 0xFFFF;
668 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
669 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
671 /* Function Pointers */
672 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
673 nvm->ops.release = e1000_release_nvm_ich8lan;
674 nvm->ops.read = e1000_read_nvm_ich8lan;
675 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
676 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
677 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
678 nvm->ops.write = e1000_write_nvm_ich8lan;
680 return E1000_SUCCESS;
684 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
685 * @hw: pointer to the HW structure
687 * Initialize family-specific MAC parameters and function
690 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
692 struct e1000_mac_info *mac = &hw->mac;
693 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
695 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
697 DEBUGFUNC("e1000_init_mac_params_ich8lan");
699 /* Set media type function pointer */
700 hw->phy.media_type = e1000_media_type_copper;
702 /* Set mta register count */
703 mac->mta_reg_count = 32;
704 /* Set rar entry count */
705 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
706 if (mac->type == e1000_ich8lan)
707 mac->rar_entry_count--;
708 /* Set if part includes ASF firmware */
709 mac->asf_firmware_present = true;
711 mac->has_fwsm = true;
712 /* ARC subsystem not supported */
713 mac->arc_subsystem_valid = false;
714 /* Adaptive IFS supported */
715 mac->adaptive_ifs = true;
717 /* Function pointers */
719 /* bus type/speed/width */
720 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
722 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
724 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
725 /* hw initialization */
726 mac->ops.init_hw = e1000_init_hw_ich8lan;
728 mac->ops.setup_link = e1000_setup_link_ich8lan;
729 /* physical interface setup */
730 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
732 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
734 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
735 /* multicast address update */
736 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
737 /* clear hardware counters */
738 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
740 /* LED and other operations */
745 /* check management mode */
746 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
748 mac->ops.id_led_init = e1000_id_led_init_generic;
750 mac->ops.blink_led = e1000_blink_led_generic;
752 mac->ops.setup_led = e1000_setup_led_generic;
754 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
755 /* turn on/off LED */
756 mac->ops.led_on = e1000_led_on_ich8lan;
757 mac->ops.led_off = e1000_led_off_ich8lan;
760 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
761 mac->ops.rar_set = e1000_rar_set_pch2lan;
764 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
765 /* multicast address update for pch2 */
766 mac->ops.update_mc_addr_list =
767 e1000_update_mc_addr_list_pch2lan;
770 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
771 /* save PCH revision_id */
772 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
773 hw->revision_id = (u8)(pci_cfg &= 0x000F);
774 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
775 /* check management mode */
776 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
778 mac->ops.id_led_init = e1000_id_led_init_pchlan;
780 mac->ops.setup_led = e1000_setup_led_pchlan;
782 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
783 /* turn on/off LED */
784 mac->ops.led_on = e1000_led_on_pchlan;
785 mac->ops.led_off = e1000_led_off_pchlan;
791 if (mac->type == e1000_pch_lpt) {
792 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
793 mac->ops.rar_set = e1000_rar_set_pch_lpt;
794 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
797 /* Enable PCS Lock-loss workaround for ICH8 */
798 if (mac->type == e1000_ich8lan)
799 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
801 return E1000_SUCCESS;
805 * __e1000_access_emi_reg_locked - Read/write EMI register
806 * @hw: pointer to the HW structure
807 * @addr: EMI address to program
808 * @data: pointer to value to read/write from/to the EMI address
809 * @read: boolean flag to indicate read or write
811 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
813 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
814 u16 *data, bool read)
818 DEBUGFUNC("__e1000_access_emi_reg_locked");
820 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
825 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
828 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
835 * e1000_read_emi_reg_locked - Read Extended Management Interface register
836 * @hw: pointer to the HW structure
837 * @addr: EMI address to program
838 * @data: value to be read from the EMI address
840 * Assumes the SW/FW/HW Semaphore is already acquired.
842 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
844 DEBUGFUNC("e1000_read_emi_reg_locked");
846 return __e1000_access_emi_reg_locked(hw, addr, data, true);
850 * e1000_write_emi_reg_locked - Write Extended Management Interface register
851 * @hw: pointer to the HW structure
852 * @addr: EMI address to program
853 * @data: value to be written to the EMI address
855 * Assumes the SW/FW/HW Semaphore is already acquired.
857 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
859 DEBUGFUNC("e1000_read_emi_reg_locked");
861 return __e1000_access_emi_reg_locked(hw, addr, &data, false);
865 * e1000_set_eee_pchlan - Enable/disable EEE support
866 * @hw: pointer to the HW structure
868 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
869 * the link and the EEE capabilities of the link partner. The LPI Control
870 * register bits will remain set only if/when link is up.
872 * EEE LPI must not be asserted earlier than one second after link is up.
873 * On 82579, EEE LPI should not be enabled until such time otherwise there
874 * can be link issues with some switches. Other devices can have EEE LPI
875 * enabled immediately upon link up since they have a timer in hardware which
876 * prevents LPI from being asserted too early.
878 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
880 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
882 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
884 DEBUGFUNC("e1000_set_eee_pchlan");
886 switch (hw->phy.type) {
887 case e1000_phy_82579:
888 lpa = I82579_EEE_LP_ABILITY;
889 pcs_status = I82579_EEE_PCS_STATUS;
890 adv_addr = I82579_EEE_ADVERTISEMENT;
893 lpa = I217_EEE_LP_ABILITY;
894 pcs_status = I217_EEE_PCS_STATUS;
895 adv_addr = I217_EEE_ADVERTISEMENT;
898 return E1000_SUCCESS;
901 ret_val = hw->phy.ops.acquire(hw);
905 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
909 /* Clear bits that enable EEE in various speeds */
910 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
912 /* Enable EEE if not disabled by user */
913 if (!dev_spec->eee_disable) {
914 /* Save off link partner's EEE ability */
915 ret_val = e1000_read_emi_reg_locked(hw, lpa,
916 &dev_spec->eee_lp_ability);
920 /* Read EEE advertisement */
921 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
925 /* Enable EEE only for speeds in which the link partner is
926 * EEE capable and for which we advertise EEE.
928 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
929 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
931 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
932 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
933 if (data & NWAY_LPAR_100TX_FD_CAPS)
934 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
936 /* EEE is not supported in 100Half, so ignore
937 * partner's EEE in 100 ability if full-duplex
940 dev_spec->eee_lp_ability &=
941 ~I82579_EEE_100_SUPPORTED;
945 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
946 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
950 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
952 hw->phy.ops.release(hw);
958 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
959 * @hw: pointer to the HW structure
960 * @link: link up bool flag
962 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
963 * preventing further DMA write requests. Workaround the issue by disabling
964 * the de-assertion of the clock request when in 1Gpbs mode.
965 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
966 * speeds in order to avoid Tx hangs.
968 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
970 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
971 u32 status = E1000_READ_REG(hw, E1000_STATUS);
972 s32 ret_val = E1000_SUCCESS;
975 if (link && (status & E1000_STATUS_SPEED_1000)) {
976 ret_val = hw->phy.ops.acquire(hw);
981 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
987 e1000_write_kmrn_reg_locked(hw,
988 E1000_KMRNCTRLSTA_K1_CONFIG,
990 ~E1000_KMRNCTRLSTA_K1_ENABLE);
996 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
997 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1000 e1000_write_kmrn_reg_locked(hw,
1001 E1000_KMRNCTRLSTA_K1_CONFIG,
1004 hw->phy.ops.release(hw);
1006 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1007 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1009 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1010 (status & E1000_STATUS_FD)))
1011 goto update_fextnvm6;
1013 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®);
1017 /* Clear link status transmit timeout */
1018 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1020 if (status & E1000_STATUS_SPEED_100) {
1021 /* Set inband Tx timeout to 5x10us for 100Half */
1022 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1024 /* Do not extend the K1 entry latency for 100Half */
1025 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1027 /* Set inband Tx timeout to 50x10us for 10Full/Half */
1029 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1031 /* Extend the K1 entry latency for 10 Mbps */
1032 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1035 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1040 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1048 * e1000_demote_ltr - Demote/Promote the LTR value
1049 * @hw: pointer to the HW structure
1050 * @demote: boolean value to control whether we are demoting or promoting
1051 * the LTR value (promoting allows deeper C-States).
1052 * @link: boolean value stating whether we currently have link
1054 * Configure the LTRV register with the proper LTR value
1056 void e1000_demote_ltr(struct e1000_hw *hw, bool demote, bool link)
1058 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1059 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1061 if ((hw->device_id != E1000_DEV_ID_PCH_I218_LM3) &&
1062 (hw->device_id != E1000_DEV_ID_PCH_I218_V3))
1066 reg |= hw->dev_spec.ich8lan.lat_enc |
1067 (hw->dev_spec.ich8lan.lat_enc <<
1068 E1000_LTRV_NOSNOOP_SHIFT);
1070 reg |= hw->dev_spec.ich8lan.max_ltr_enc |
1071 (hw->dev_spec.ich8lan.max_ltr_enc <<
1072 E1000_LTRV_NOSNOOP_SHIFT);
1075 E1000_WRITE_REG(hw, E1000_LTRV, reg);
1079 #endif /* C10_SUPPORT */
1080 #if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
1082 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1083 * @hw: pointer to the HW structure
1084 * @to_sx: boolean indicating a system power state transition to Sx
1086 * When link is down, configure ULP mode to significantly reduce the power
1087 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1088 * ME firmware to start the ULP configuration. If not on an ME enabled
1089 * system, configure the ULP mode by software.
1091 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1094 s32 ret_val = E1000_SUCCESS;
1097 if ((hw->mac.type < e1000_pch_lpt) ||
1098 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1099 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1100 #ifdef NAHUM6_LPTH_I218_HW
1101 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1102 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1104 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1107 #ifdef ULP_IN_D0_SUPPORT
1111 /* Poll up to 5 seconds for Cable Disconnected indication */
1112 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1113 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1114 /* Bail if link is re-acquired */
1115 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1116 return -E1000_ERR_PHY;
1123 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1124 (E1000_READ_REG(hw, E1000_FEXT) &
1125 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1129 #endif /* ULP_IN_D0_SUPPORT */
1130 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1131 /* Request ME configure ULP mode in the PHY */
1132 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1133 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1134 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1139 #ifndef ULP_IN_D0_SUPPORT
1143 /* Poll up to 5 seconds for Cable Disconnected indication */
1144 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1145 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1146 /* Bail if link is re-acquired */
1147 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1148 return -E1000_ERR_PHY;
1155 DEBUGOUT("CABLE_DISCONNECTED %s set after %dmsec\n",
1156 (E1000_READ_REG(hw, E1000_FEXT) &
1157 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1161 #endif /* !ULP_IN_D0_SUPPORT */
1162 ret_val = hw->phy.ops.acquire(hw);
1166 /* Force SMBus mode in PHY */
1167 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1170 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1171 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1173 /* Force SMBus mode in MAC */
1174 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1175 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1176 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1178 #ifdef ULP_IN_D0_SUPPORT
1180 /* Change the 'Link Status Change' interrupt to trigger
1181 * on 'Cable Status Change'
1183 ret_val = e1000_read_kmrn_reg_locked(hw,
1184 E1000_KMRNCTRLSTA_OP_MODES,
1188 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1189 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1193 #endif /* ULP_IN_D0_SUPPORT */
1194 /* Set Inband ULP Exit, Reset to SMBus mode and
1195 * Disable SMBus Release on PERST# in PHY
1197 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1200 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1201 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1203 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1204 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1206 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1208 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1210 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1212 /* Set Disable SMBus Release on PERST# in MAC */
1213 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1214 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1215 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1217 /* Commit ULP changes in PHY by starting auto ULP configuration */
1218 phy_reg |= I218_ULP_CONFIG1_START;
1219 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1220 #ifdef ULP_IN_D0_SUPPORT
1223 /* Disable Tx so that the MAC doesn't send any (buffered)
1224 * packets to the PHY.
1226 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1227 mac_reg &= ~E1000_TCTL_EN;
1228 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1232 hw->phy.ops.release(hw);
1235 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1237 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1243 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1244 * @hw: pointer to the HW structure
1245 * @force: boolean indicating whether or not to force disabling ULP
1247 * Un-configure ULP mode when link is up, the system is transitioned from
1248 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1249 * system, poll for an indication from ME that ULP has been un-configured.
1250 * If not on an ME enabled system, un-configure the ULP mode by software.
1252 * During nominal operation, this function is called when link is acquired
1253 * to disable ULP mode (force=false); otherwise, for example when unloading
1254 * the driver or during Sx->S0 transitions, this is called with force=true
1255 * to forcibly disable ULP.
1256 #ifdef ULP_IN_D0_SUPPORT
1258 * When the cable is plugged in while the device is in D0, a Cable Status
1259 * Change interrupt is generated which causes this function to be called
1260 * to partially disable ULP mode and restart autonegotiation. This function
1261 * is then called again due to the resulting Link Status Change interrupt
1262 * to finish cleaning up after the ULP flow.
1265 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1267 s32 ret_val = E1000_SUCCESS;
1272 if ((hw->mac.type < e1000_pch_lpt) ||
1273 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1274 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1275 #ifdef NAHUM6_LPTH_I218_HW
1276 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1277 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1279 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1282 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1284 /* Request ME un-configure ULP mode in the PHY */
1285 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1286 mac_reg &= ~E1000_H2ME_ULP;
1287 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1288 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1291 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1292 while (E1000_READ_REG(hw, E1000_FWSM) &
1293 E1000_FWSM_ULP_CFG_DONE) {
1295 ret_val = -E1000_ERR_PHY;
1301 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1304 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1305 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1306 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1308 /* Clear H2ME.ULP after ME ULP configuration */
1309 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1310 mac_reg &= ~E1000_H2ME_ULP;
1311 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1312 #ifdef ULP_IN_D0_SUPPORT
1314 /* Restore link speed advertisements and restart
1317 ret_val = e1000_phy_setup_autoneg(hw);
1321 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1329 /* Toggle LANPHYPC Value bit */
1330 e1000_toggle_lanphypc_pch_lpt(hw);
1332 ret_val = hw->phy.ops.acquire(hw);
1336 #ifdef ULP_IN_D0_SUPPORT
1337 /* Revert the change to the 'Link Status Change'
1338 * interrupt to trigger on 'Cable Status Change'
1340 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1344 phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1345 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1348 /* Unforce SMBus mode in PHY */
1349 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1351 /* The MAC might be in PCIe mode, so temporarily force to
1352 * SMBus mode in order to access the PHY.
1354 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1355 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1356 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1360 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1365 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1366 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1368 /* Unforce SMBus mode in MAC */
1369 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1370 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1371 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1373 /* When ULP mode was previously entered, K1 was disabled by the
1374 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1376 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1379 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1380 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1382 /* Clear ULP enabled configuration */
1383 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1386 #ifdef ULP_IN_D0_SUPPORT
1387 /* CSC interrupt received due to ULP Indication */
1388 if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1390 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1391 I218_ULP_CONFIG1_STICKY_ULP |
1392 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1393 I218_ULP_CONFIG1_WOL_HOST |
1394 I218_ULP_CONFIG1_INBAND_EXIT |
1395 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1396 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1398 /* Commit ULP changes by starting auto ULP configuration */
1399 phy_reg |= I218_ULP_CONFIG1_START;
1400 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1402 /* Clear Disable SMBus Release on PERST# in MAC */
1403 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1404 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1405 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1407 #ifdef ULP_IN_D0_SUPPORT
1409 hw->phy.ops.release(hw);
1411 if (hw->mac.autoneg)
1412 e1000_phy_setup_autoneg(hw);
1414 e1000_sw_lcd_config_ich8lan(hw);
1416 e1000_oem_bits_config_ich8lan(hw, true);
1418 /* Set ULP state to unknown and return non-zero to
1419 * indicate no link (yet) and re-enter on the next LSC
1420 * to finish disabling ULP flow.
1422 hw->dev_spec.ich8lan.ulp_state =
1423 e1000_ulp_state_unknown;
1430 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1431 mac_reg |= E1000_TCTL_EN;
1432 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1436 hw->phy.ops.release(hw);
1438 hw->phy.ops.reset(hw);
1443 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1445 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1450 #endif /* NAHUM6LP_HW && ULP_SUPPORT */
1452 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1453 * @hw: pointer to the HW structure
1455 * Checks to see of the link status of the hardware has changed. If a
1456 * change in link status has been detected, then we read the PHY registers
1457 * to get the current speed/duplex if link exists.
1459 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1461 struct e1000_mac_info *mac = &hw->mac;
1463 #if defined(NAHUM6LP_HW) && defined(ULP_IN_D0_SUPPORT)
1470 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1472 /* We only want to go out to the PHY registers to see if Auto-Neg
1473 * has completed and/or if our link status has changed. The
1474 * get_link_status flag is set upon receiving a Link Status
1475 * Change or Rx Sequence Error interrupt.
1477 if (!mac->get_link_status)
1478 return E1000_SUCCESS;
1480 #if defined(NAHUM6LP_HW) && defined(ULP_IN_D0_SUPPORT)
1481 if ((hw->mac.type < e1000_pch_lpt) ||
1482 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1483 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1484 #endif /* NAHUM6LP_HW && ULP_IN_D0_SUPPORT */
1485 /* First we want to see if the MII Status Register reports
1486 * link. If so, then we want to get the current speed/duplex
1489 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1492 #if defined(NAHUM6LP_HW) && defined(ULP_IN_D0_SUPPORT)
1494 /* Check the MAC's STATUS register to determine link state
1495 * since the PHY could be inaccessible while in ULP mode.
1497 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1499 ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1501 ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1506 #endif /* NAHUM6LP_HW && ULP_IN_D0_SUPPORT */
1508 if (hw->mac.type == e1000_pchlan) {
1509 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1514 /* When connected at 10Mbps half-duplex, 82579 parts are excessively
1515 * aggressive resulting in many collisions. To avoid this, increase
1516 * the IPG and reduce Rx latency in the PHY.
1518 if ((hw->mac.type == e1000_pch2lan) && link) {
1520 reg = E1000_READ_REG(hw, E1000_STATUS);
1521 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1522 reg = E1000_READ_REG(hw, E1000_TIPG);
1523 reg &= ~E1000_TIPG_IPGT_MASK;
1525 E1000_WRITE_REG(hw, E1000_TIPG, reg);
1527 /* Reduce Rx latency in analog PHY */
1528 ret_val = hw->phy.ops.acquire(hw);
1532 ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
1534 hw->phy.ops.release(hw);
1541 #if defined(NAHUM6LP_HW) && defined(NAHUM6_WPT_HW)
1542 /* Work-around I218 hang issue */
1543 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1544 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1545 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1546 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1547 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1552 /* Work-around I218 hang issue */
1553 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1554 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
1555 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1560 #endif /* defined(NAHUM6LP_HW) && defined(NAHUM6_WPT_HW) */
1561 /* Clear link partner's EEE ability */
1562 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1565 return E1000_SUCCESS; /* No link detected */
1567 mac->get_link_status = false;
1569 switch (hw->mac.type) {
1571 ret_val = e1000_k1_workaround_lv(hw);
1576 if (hw->phy.type == e1000_phy_82578) {
1577 ret_val = e1000_link_stall_workaround_hv(hw);
1582 /* Workaround for PCHx parts in half-duplex:
1583 * Set the number of preambles removed from the packet
1584 * when it is passed from the PHY to the MAC to prevent
1585 * the MAC from misinterpreting the packet type.
1587 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1588 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1590 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1592 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1594 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1600 /* Check if there was DownShift, must be checked
1601 * immediately after link-up
1603 e1000_check_downshift_generic(hw);
1605 /* Enable/Disable EEE after link up */
1606 if (hw->phy.type > e1000_phy_82579) {
1607 ret_val = e1000_set_eee_pchlan(hw);
1612 /* If we are forcing speed/duplex, then we simply return since
1613 * we have already determined whether we have link or not.
1616 return -E1000_ERR_CONFIG;
1618 /* Auto-Neg is enabled. Auto Speed Detection takes care
1619 * of MAC speed/duplex configuration. So we only need to
1620 * configure Collision Distance in the MAC.
1622 mac->ops.config_collision_dist(hw);
1624 /* Configure Flow Control now that Auto-Neg has completed.
1625 * First, we need to restore the desired flow control
1626 * settings because we may have had to re-autoneg with a
1627 * different link partner.
1629 ret_val = e1000_config_fc_after_link_up_generic(hw);
1631 DEBUGOUT("Error configuring flow control\n");
1637 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1638 * @hw: pointer to the HW structure
1640 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1642 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1644 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1646 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1647 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1648 switch (hw->mac.type) {
1651 case e1000_ich10lan:
1652 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1657 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1665 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1666 * @hw: pointer to the HW structure
1668 * Acquires the mutex for performing NVM operations.
1670 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1672 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1674 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1676 return E1000_SUCCESS;
1680 * e1000_release_nvm_ich8lan - Release NVM mutex
1681 * @hw: pointer to the HW structure
1683 * Releases the mutex used while performing NVM operations.
1685 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1687 DEBUGFUNC("e1000_release_nvm_ich8lan");
1689 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1695 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1696 * @hw: pointer to the HW structure
1698 * Acquires the software control flag for performing PHY and select
1701 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1703 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1704 s32 ret_val = E1000_SUCCESS;
1706 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1708 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1711 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1712 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1720 DEBUGOUT("SW has already locked the resource.\n");
1721 ret_val = -E1000_ERR_CONFIG;
1725 timeout = SW_FLAG_TIMEOUT;
1727 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1728 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1731 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1732 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1740 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1741 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1742 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1743 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1744 ret_val = -E1000_ERR_CONFIG;
1750 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1756 * e1000_release_swflag_ich8lan - Release software control flag
1757 * @hw: pointer to the HW structure
1759 * Releases the software control flag for performing PHY and select
1762 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1766 DEBUGFUNC("e1000_release_swflag_ich8lan");
1768 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1770 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1771 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1772 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1774 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1777 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1783 * e1000_check_mng_mode_ich8lan - Checks management mode
1784 * @hw: pointer to the HW structure
1786 * This checks if the adapter has any manageability enabled.
1787 * This is a function pointer entry point only called by read/write
1788 * routines for the PHY and NVM parts.
1790 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1794 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1796 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1798 return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
1799 ((fwsm & E1000_FWSM_MODE_MASK) ==
1800 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
1804 * e1000_check_mng_mode_pchlan - Checks management mode
1805 * @hw: pointer to the HW structure
1807 * This checks if the adapter has iAMT enabled.
1808 * This is a function pointer entry point only called by read/write
1809 * routines for the PHY and NVM parts.
1811 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1815 DEBUGFUNC("e1000_check_mng_mode_pchlan");
1817 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1819 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1820 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1824 * e1000_rar_set_pch2lan - Set receive address register
1825 * @hw: pointer to the HW structure
1826 * @addr: pointer to the receive address
1827 * @index: receive address array register
1829 * Sets the receive address array register at index to the address passed
1830 * in by addr. For 82579, RAR[0] is the base address register that is to
1831 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1832 * Use SHRA[0-3] in place of those reserved for ME.
1834 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1836 u32 rar_low, rar_high;
1838 DEBUGFUNC("e1000_rar_set_pch2lan");
1840 /* HW expects these in little endian so we reverse the byte order
1841 * from network order (big endian) to little endian
1843 rar_low = ((u32) addr[0] |
1844 ((u32) addr[1] << 8) |
1845 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1847 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1849 /* If MAC address zero, no need to set the AV bit */
1850 if (rar_low || rar_high)
1851 rar_high |= E1000_RAH_AV;
1854 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1855 E1000_WRITE_FLUSH(hw);
1856 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1857 E1000_WRITE_FLUSH(hw);
1861 /* RAR[1-6] are owned by manageability. Skip those and program the
1862 * next address into the SHRA register array.
1864 if (index < (u32) (hw->mac.rar_entry_count - 6)) {
1867 ret_val = e1000_acquire_swflag_ich8lan(hw);
1871 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1872 E1000_WRITE_FLUSH(hw);
1873 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1874 E1000_WRITE_FLUSH(hw);
1876 e1000_release_swflag_ich8lan(hw);
1878 /* verify the register updates */
1879 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1880 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1883 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1884 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1888 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1892 * e1000_rar_set_pch_lpt - Set receive address registers
1893 * @hw: pointer to the HW structure
1894 * @addr: pointer to the receive address
1895 * @index: receive address array register
1897 * Sets the receive address register array at index to the address passed
1898 * in by addr. For LPT, RAR[0] is the base address register that is to
1899 * contain the MAC address. SHRA[0-10] are the shared receive address
1900 * registers that are shared between the Host and manageability engine (ME).
1902 STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1904 u32 rar_low, rar_high;
1907 DEBUGFUNC("e1000_rar_set_pch_lpt");
1909 /* HW expects these in little endian so we reverse the byte order
1910 * from network order (big endian) to little endian
1912 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1913 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1915 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1917 /* If MAC address zero, no need to set the AV bit */
1918 if (rar_low || rar_high)
1919 rar_high |= E1000_RAH_AV;
1922 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1923 E1000_WRITE_FLUSH(hw);
1924 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1925 E1000_WRITE_FLUSH(hw);
1929 /* The manageability engine (ME) can lock certain SHRAR registers that
1930 * it is using - those registers are unavailable for use.
1932 if (index < hw->mac.rar_entry_count) {
1933 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1934 E1000_FWSM_WLOCK_MAC_MASK;
1935 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1937 /* Check if all SHRAR registers are locked */
1941 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1944 ret_val = e1000_acquire_swflag_ich8lan(hw);
1949 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1951 E1000_WRITE_FLUSH(hw);
1952 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1954 E1000_WRITE_FLUSH(hw);
1956 e1000_release_swflag_ich8lan(hw);
1958 /* verify the register updates */
1959 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1960 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1966 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1969 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1971 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1972 * @hw: pointer to the HW structure
1973 * @mc_addr_list: array of multicast addresses to program
1974 * @mc_addr_count: number of multicast addresses to program
1976 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1977 * The caller must have a packed mc_addr_list of multicast addresses.
1979 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1987 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1989 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1991 ret_val = hw->phy.ops.acquire(hw);
1995 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1999 for (i = 0; i < hw->mac.mta_reg_count; i++) {
2000 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2001 (u16)(hw->mac.mta_shadow[i] &
2003 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2004 (u16)((hw->mac.mta_shadow[i] >> 16) &
2008 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2011 hw->phy.ops.release(hw);
2014 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
2016 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2017 * @hw: pointer to the HW structure
2019 * Checks if firmware is blocking the reset of the PHY.
2020 * This is a function pointer entry point only called by
2023 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2026 bool blocked = false;
2029 DEBUGFUNC("e1000_check_reset_block_ich8lan");
2032 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2033 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2039 } while (blocked && (i++ < 10));
2040 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2044 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2045 * @hw: pointer to the HW structure
2047 * Assumes semaphore already acquired.
2050 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2053 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2054 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2055 E1000_STRAP_SMT_FREQ_SHIFT;
2058 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2060 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2064 phy_data &= ~HV_SMB_ADDR_MASK;
2065 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2066 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2068 if (hw->phy.type == e1000_phy_i217) {
2069 /* Restore SMBus frequency */
2071 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2072 phy_data |= (freq & (1 << 0)) <<
2073 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2074 phy_data |= (freq & (1 << 1)) <<
2075 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2077 DEBUGOUT("Unsupported SMB frequency in PHY\n");
2081 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2085 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2086 * @hw: pointer to the HW structure
2088 * SW should configure the LCD from the NVM extended configuration region
2089 * as a workaround for certain parts.
2091 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2093 struct e1000_phy_info *phy = &hw->phy;
2094 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2095 s32 ret_val = E1000_SUCCESS;
2096 u16 word_addr, reg_data, reg_addr, phy_page = 0;
2098 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2100 /* Initialize the PHY from the NVM on ICH platforms. This
2101 * is needed due to an issue where the NVM configuration is
2102 * not properly autoloaded after power transitions.
2103 * Therefore, after each PHY reset, we will load the
2104 * configuration data out of the NVM manually.
2106 switch (hw->mac.type) {
2108 if (phy->type != e1000_phy_igp_3)
2111 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2112 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2113 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2120 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2126 ret_val = hw->phy.ops.acquire(hw);
2130 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2131 if (!(data & sw_cfg_mask))
2134 /* Make sure HW does not configure LCD from PHY
2135 * extended configuration before SW configuration
2137 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2138 if ((hw->mac.type < e1000_pch2lan) &&
2139 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2142 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2143 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2144 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2148 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2149 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2151 if (((hw->mac.type == e1000_pchlan) &&
2152 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2153 (hw->mac.type > e1000_pchlan)) {
2154 /* HW configures the SMBus address and LEDs when the
2155 * OEM and LCD Write Enable bits are set in the NVM.
2156 * When both NVM bits are cleared, SW will configure
2159 ret_val = e1000_write_smbus_addr(hw);
2163 data = E1000_READ_REG(hw, E1000_LEDCTL);
2164 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2170 /* Configure LCD from extended configuration region. */
2172 /* cnf_base_addr is in DWORD */
2173 word_addr = (u16)(cnf_base_addr << 1);
2175 for (i = 0; i < cnf_size; i++) {
2176 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2181 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2186 /* Save off the PHY page for future writes. */
2187 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2188 phy_page = reg_data;
2192 reg_addr &= PHY_REG_MASK;
2193 reg_addr |= phy_page;
2195 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2202 hw->phy.ops.release(hw);
2207 * e1000_k1_gig_workaround_hv - K1 Si workaround
2208 * @hw: pointer to the HW structure
2209 * @link: link up bool flag
2211 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2212 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
2213 * If link is down, the function will restore the default K1 setting located
2216 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2218 s32 ret_val = E1000_SUCCESS;
2220 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2222 DEBUGFUNC("e1000_k1_gig_workaround_hv");
2224 if (hw->mac.type != e1000_pchlan)
2225 return E1000_SUCCESS;
2227 /* Wrap the whole flow with the sw flag */
2228 ret_val = hw->phy.ops.acquire(hw);
2232 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2234 if (hw->phy.type == e1000_phy_82578) {
2235 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2240 status_reg &= (BM_CS_STATUS_LINK_UP |
2241 BM_CS_STATUS_RESOLVED |
2242 BM_CS_STATUS_SPEED_MASK);
2244 if (status_reg == (BM_CS_STATUS_LINK_UP |
2245 BM_CS_STATUS_RESOLVED |
2246 BM_CS_STATUS_SPEED_1000))
2250 if (hw->phy.type == e1000_phy_82577) {
2251 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2256 status_reg &= (HV_M_STATUS_LINK_UP |
2257 HV_M_STATUS_AUTONEG_COMPLETE |
2258 HV_M_STATUS_SPEED_MASK);
2260 if (status_reg == (HV_M_STATUS_LINK_UP |
2261 HV_M_STATUS_AUTONEG_COMPLETE |
2262 HV_M_STATUS_SPEED_1000))
2266 /* Link stall fix for link up */
2267 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2273 /* Link stall fix for link down */
2274 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2280 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2283 hw->phy.ops.release(hw);
2289 * e1000_configure_k1_ich8lan - Configure K1 power state
2290 * @hw: pointer to the HW structure
2291 * @enable: K1 state to configure
2293 * Configure the K1 power state based on the provided parameter.
2294 * Assumes semaphore already acquired.
2296 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2298 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2306 DEBUGFUNC("e1000_configure_k1_ich8lan");
2308 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2314 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2316 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2318 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2324 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2325 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2327 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2328 reg |= E1000_CTRL_FRCSPD;
2329 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2331 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2332 E1000_WRITE_FLUSH(hw);
2334 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2335 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2336 E1000_WRITE_FLUSH(hw);
2339 return E1000_SUCCESS;
2343 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2344 * @hw: pointer to the HW structure
2345 * @d0_state: boolean if entering d0 or d3 device state
2347 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2348 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
2349 * in NVM determines whether HW should configure LPLU and Gbe Disable.
2351 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2357 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2359 if (hw->mac.type < e1000_pchlan)
2362 ret_val = hw->phy.ops.acquire(hw);
2366 if (hw->mac.type == e1000_pchlan) {
2367 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2368 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2372 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2373 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2376 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2378 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2382 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2385 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2386 oem_reg |= HV_OEM_BITS_GBE_DIS;
2388 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2389 oem_reg |= HV_OEM_BITS_LPLU;
2391 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2392 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2393 oem_reg |= HV_OEM_BITS_GBE_DIS;
2395 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2396 E1000_PHY_CTRL_NOND0A_LPLU))
2397 oem_reg |= HV_OEM_BITS_LPLU;
2400 /* Set Restart auto-neg to activate the bits */
2401 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2402 !hw->phy.ops.check_reset_block(hw))
2403 oem_reg |= HV_OEM_BITS_RESTART_AN;
2405 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2408 hw->phy.ops.release(hw);
2415 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2416 * @hw: pointer to the HW structure
2418 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2423 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2425 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2429 data |= HV_KMRN_MDIO_SLOW;
2431 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2437 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2438 * done after every PHY reset.
2440 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2442 s32 ret_val = E1000_SUCCESS;
2445 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2447 if (hw->mac.type != e1000_pchlan)
2448 return E1000_SUCCESS;
2450 /* Set MDIO slow mode before any other MDIO access */
2451 if (hw->phy.type == e1000_phy_82577) {
2452 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2457 if (((hw->phy.type == e1000_phy_82577) &&
2458 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2459 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2460 /* Disable generation of early preamble */
2461 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2465 /* Preamble tuning for SSC */
2466 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2472 if (hw->phy.type == e1000_phy_82578) {
2473 /* Return registers to default by doing a soft reset then
2474 * writing 0x3140 to the control register.
2476 if (hw->phy.revision < 2) {
2477 e1000_phy_sw_reset_generic(hw);
2478 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2484 ret_val = hw->phy.ops.acquire(hw);
2489 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2490 hw->phy.ops.release(hw);
2494 /* Configure the K1 Si workaround during phy reset assuming there is
2495 * link so that it disables K1 if link is in 1Gbps.
2497 ret_val = e1000_k1_gig_workaround_hv(hw, true);
2501 /* Workaround for link disconnects on a busy hub in half duplex */
2502 ret_val = hw->phy.ops.acquire(hw);
2505 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2508 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2513 /* set MSE higher to enable link to stay up when noise is high */
2514 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2516 hw->phy.ops.release(hw);
2522 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2523 * @hw: pointer to the HW structure
2525 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2531 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2533 ret_val = hw->phy.ops.acquire(hw);
2536 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2540 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2541 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2542 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2543 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2544 (u16)(mac_reg & 0xFFFF));
2545 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2546 (u16)((mac_reg >> 16) & 0xFFFF));
2548 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2549 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2550 (u16)(mac_reg & 0xFFFF));
2551 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2552 (u16)((mac_reg & E1000_RAH_AV)
2556 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2559 hw->phy.ops.release(hw);
2562 #ifndef CRC32_OS_SUPPORT
2563 static u32 e1000_calc_rx_da_crc(u8 mac[])
2565 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
2566 u32 i, j, mask, crc;
2568 DEBUGFUNC("e1000_calc_rx_da_crc");
2571 for (i = 0; i < 6; i++) {
2573 for (j = 8; j > 0; j--) {
2574 mask = (crc & 1) * (-1);
2575 crc = (crc >> 1) ^ (poly & mask);
2581 #endif /* CRC32_OS_SUPPORT */
2583 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2585 * @hw: pointer to the HW structure
2586 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
2588 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2590 s32 ret_val = E1000_SUCCESS;
2595 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2597 if (hw->mac.type < e1000_pch2lan)
2598 return E1000_SUCCESS;
2600 /* disable Rx path while enabling/disabling workaround */
2601 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2602 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2603 phy_reg | (1 << 14));
2608 /* Write Rx addresses (rar_entry_count for RAL/H, and
2609 * SHRAL/H) and initial CRC values to the MAC
2611 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2612 u8 mac_addr[ETH_ADDR_LEN] = {0};
2613 u32 addr_high, addr_low;
2615 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2616 if (!(addr_high & E1000_RAH_AV))
2618 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2619 mac_addr[0] = (addr_low & 0xFF);
2620 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2621 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2622 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2623 mac_addr[4] = (addr_high & 0xFF);
2624 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2626 #ifndef CRC32_OS_SUPPORT
2627 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2628 e1000_calc_rx_da_crc(mac_addr));
2629 #else /* CRC32_OS_SUPPORT */
2630 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2631 E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2632 #endif /* CRC32_OS_SUPPORT */
2635 /* Write Rx addresses to the PHY */
2636 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2638 /* Enable jumbo frame workaround in the MAC */
2639 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2640 mac_reg &= ~(1 << 14);
2641 mac_reg |= (7 << 15);
2642 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2644 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2645 mac_reg |= E1000_RCTL_SECRC;
2646 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2648 ret_val = e1000_read_kmrn_reg_generic(hw,
2649 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2653 ret_val = e1000_write_kmrn_reg_generic(hw,
2654 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2658 ret_val = e1000_read_kmrn_reg_generic(hw,
2659 E1000_KMRNCTRLSTA_HD_CTRL,
2663 data &= ~(0xF << 8);
2665 ret_val = e1000_write_kmrn_reg_generic(hw,
2666 E1000_KMRNCTRLSTA_HD_CTRL,
2671 /* Enable jumbo frame workaround in the PHY */
2672 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2673 data &= ~(0x7F << 5);
2674 data |= (0x37 << 5);
2675 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2678 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2680 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2683 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2684 data &= ~(0x3FF << 2);
2685 data |= (0x1A << 2);
2686 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2689 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2692 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2693 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2698 /* Write MAC register values back to h/w defaults */
2699 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2700 mac_reg &= ~(0xF << 14);
2701 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2703 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2704 mac_reg &= ~E1000_RCTL_SECRC;
2705 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2707 ret_val = e1000_read_kmrn_reg_generic(hw,
2708 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2712 ret_val = e1000_write_kmrn_reg_generic(hw,
2713 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2717 ret_val = e1000_read_kmrn_reg_generic(hw,
2718 E1000_KMRNCTRLSTA_HD_CTRL,
2722 data &= ~(0xF << 8);
2724 ret_val = e1000_write_kmrn_reg_generic(hw,
2725 E1000_KMRNCTRLSTA_HD_CTRL,
2730 /* Write PHY register values back to h/w defaults */
2731 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2732 data &= ~(0x7F << 5);
2733 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2736 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2738 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2741 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2742 data &= ~(0x3FF << 2);
2744 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2747 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2750 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2751 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2757 /* re-enable Rx path after enabling/disabling workaround */
2758 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2763 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2764 * done after every PHY reset.
2766 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2768 s32 ret_val = E1000_SUCCESS;
2770 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2772 if (hw->mac.type != e1000_pch2lan)
2773 return E1000_SUCCESS;
2775 /* Set MDIO slow mode before any other MDIO access */
2776 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2780 ret_val = hw->phy.ops.acquire(hw);
2783 /* set MSE higher to enable link to stay up when noise is high */
2784 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2787 /* drop link after 5 times MSE threshold was reached */
2788 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2790 hw->phy.ops.release(hw);
2796 * e1000_k1_gig_workaround_lv - K1 Si workaround
2797 * @hw: pointer to the HW structure
2799 * Workaround to set the K1 beacon duration for 82579 parts
2801 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2803 s32 ret_val = E1000_SUCCESS;
2808 DEBUGFUNC("e1000_k1_workaround_lv");
2810 if (hw->mac.type != e1000_pch2lan)
2811 return E1000_SUCCESS;
2813 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
2814 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2818 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2819 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2820 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2821 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2823 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
2827 if (status_reg & HV_M_STATUS_SPEED_1000) {
2830 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2831 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2832 /* LV 1G Packet drop issue wa */
2833 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2837 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2838 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2843 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2844 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2846 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2847 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2854 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2855 * @hw: pointer to the HW structure
2856 * @gate: boolean set to true to gate, false to ungate
2858 * Gate/ungate the automatic PHY configuration via hardware; perform
2859 * the configuration via software instead.
2861 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2865 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2867 if (hw->mac.type < e1000_pch2lan)
2870 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2873 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2875 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2877 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2881 * e1000_lan_init_done_ich8lan - Check for PHY config completion
2882 * @hw: pointer to the HW structure
2884 * Check the appropriate indication the MAC has finished configuring the
2885 * PHY after a software reset.
2887 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2889 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2891 DEBUGFUNC("e1000_lan_init_done_ich8lan");
2893 /* Wait for basic configuration completes before proceeding */
2895 data = E1000_READ_REG(hw, E1000_STATUS);
2896 data &= E1000_STATUS_LAN_INIT_DONE;
2898 } while ((!data) && --loop);
2900 /* If basic configuration is incomplete before the above loop
2901 * count reaches 0, loading the configuration from NVM will
2902 * leave the PHY in a bad state possibly resulting in no link.
2905 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2907 /* Clear the Init Done bit for the next init event */
2908 data = E1000_READ_REG(hw, E1000_STATUS);
2909 data &= ~E1000_STATUS_LAN_INIT_DONE;
2910 E1000_WRITE_REG(hw, E1000_STATUS, data);
2914 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2915 * @hw: pointer to the HW structure
2917 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2919 s32 ret_val = E1000_SUCCESS;
2922 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2924 if (hw->phy.ops.check_reset_block(hw))
2925 return E1000_SUCCESS;
2927 /* Allow time for h/w to get to quiescent state after reset */
2930 /* Perform any necessary post-reset workarounds */
2931 switch (hw->mac.type) {
2933 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2938 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2946 /* Clear the host wakeup bit after lcd reset */
2947 if (hw->mac.type >= e1000_pchlan) {
2948 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
2949 reg &= ~BM_WUC_HOST_WU_BIT;
2950 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2953 /* Configure the LCD with the extended configuration region in NVM */
2954 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2958 /* Configure the LCD with the OEM bits in NVM */
2959 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2961 if (hw->mac.type == e1000_pch2lan) {
2962 /* Ungate automatic PHY configuration on non-managed 82579 */
2963 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2964 E1000_ICH_FWSM_FW_VALID)) {
2966 e1000_gate_hw_phy_config_ich8lan(hw, false);
2969 /* Set EEE LPI Update Timer to 200usec */
2970 ret_val = hw->phy.ops.acquire(hw);
2973 ret_val = e1000_write_emi_reg_locked(hw,
2974 I82579_LPI_UPDATE_TIMER,
2976 hw->phy.ops.release(hw);
2983 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2984 * @hw: pointer to the HW structure
2987 * This is a function pointer entry point called by drivers
2988 * or other shared routines.
2990 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2992 s32 ret_val = E1000_SUCCESS;
2994 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2996 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2997 if ((hw->mac.type == e1000_pch2lan) &&
2998 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2999 e1000_gate_hw_phy_config_ich8lan(hw, true);
3001 ret_val = e1000_phy_hw_reset_generic(hw);
3005 return e1000_post_phy_reset_ich8lan(hw);
3009 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3010 * @hw: pointer to the HW structure
3011 * @active: true to enable LPLU, false to disable
3013 * Sets the LPLU state according to the active flag. For PCH, if OEM write
3014 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3015 * the phy speed. This function will manually set the LPLU bit and restart
3016 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
3017 * since it configures the same bit.
3019 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3024 DEBUGFUNC("e1000_set_lplu_state_pchlan");
3026 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3031 oem_reg |= HV_OEM_BITS_LPLU;
3033 oem_reg &= ~HV_OEM_BITS_LPLU;
3035 if (!hw->phy.ops.check_reset_block(hw))
3036 oem_reg |= HV_OEM_BITS_RESTART_AN;
3038 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3042 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3043 * @hw: pointer to the HW structure
3044 * @active: true to enable LPLU, false to disable
3046 * Sets the LPLU D0 state according to the active flag. When
3047 * activating LPLU this function also disables smart speed
3048 * and vice versa. LPLU will not be activated unless the
3049 * device autonegotiation advertisement meets standards of
3050 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3051 * This is a function pointer entry point only called by
3052 * PHY setup routines.
3054 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3056 struct e1000_phy_info *phy = &hw->phy;
3058 s32 ret_val = E1000_SUCCESS;
3061 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3063 if (phy->type == e1000_phy_ife)
3064 return E1000_SUCCESS;
3066 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3069 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3070 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3072 if (phy->type != e1000_phy_igp_3)
3073 return E1000_SUCCESS;
3075 /* Call gig speed drop workaround on LPLU before accessing
3078 if (hw->mac.type == e1000_ich8lan)
3079 e1000_gig_downshift_workaround_ich8lan(hw);
3081 /* When LPLU is enabled, we should disable SmartSpeed */
3082 ret_val = phy->ops.read_reg(hw,
3083 IGP01E1000_PHY_PORT_CONFIG,
3087 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3088 ret_val = phy->ops.write_reg(hw,
3089 IGP01E1000_PHY_PORT_CONFIG,
3094 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3095 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3097 if (phy->type != e1000_phy_igp_3)
3098 return E1000_SUCCESS;
3100 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3101 * during Dx states where the power conservation is most
3102 * important. During driver activity we should enable
3103 * SmartSpeed, so performance is maintained.
3105 if (phy->smart_speed == e1000_smart_speed_on) {
3106 ret_val = phy->ops.read_reg(hw,
3107 IGP01E1000_PHY_PORT_CONFIG,
3112 data |= IGP01E1000_PSCFR_SMART_SPEED;
3113 ret_val = phy->ops.write_reg(hw,
3114 IGP01E1000_PHY_PORT_CONFIG,
3118 } else if (phy->smart_speed == e1000_smart_speed_off) {
3119 ret_val = phy->ops.read_reg(hw,
3120 IGP01E1000_PHY_PORT_CONFIG,
3125 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3126 ret_val = phy->ops.write_reg(hw,
3127 IGP01E1000_PHY_PORT_CONFIG,
3134 return E1000_SUCCESS;
3138 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3139 * @hw: pointer to the HW structure
3140 * @active: true to enable LPLU, false to disable
3142 * Sets the LPLU D3 state according to the active flag. When
3143 * activating LPLU this function also disables smart speed
3144 * and vice versa. LPLU will not be activated unless the
3145 * device autonegotiation advertisement meets standards of
3146 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3147 * This is a function pointer entry point only called by
3148 * PHY setup routines.
3150 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3152 struct e1000_phy_info *phy = &hw->phy;
3154 s32 ret_val = E1000_SUCCESS;
3157 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3159 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3162 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3163 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3165 if (phy->type != e1000_phy_igp_3)
3166 return E1000_SUCCESS;
3168 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3169 * during Dx states where the power conservation is most
3170 * important. During driver activity we should enable
3171 * SmartSpeed, so performance is maintained.
3173 if (phy->smart_speed == e1000_smart_speed_on) {
3174 ret_val = phy->ops.read_reg(hw,
3175 IGP01E1000_PHY_PORT_CONFIG,
3180 data |= IGP01E1000_PSCFR_SMART_SPEED;
3181 ret_val = phy->ops.write_reg(hw,
3182 IGP01E1000_PHY_PORT_CONFIG,
3186 } else if (phy->smart_speed == e1000_smart_speed_off) {
3187 ret_val = phy->ops.read_reg(hw,
3188 IGP01E1000_PHY_PORT_CONFIG,
3193 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3194 ret_val = phy->ops.write_reg(hw,
3195 IGP01E1000_PHY_PORT_CONFIG,
3200 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3201 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3202 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3203 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3204 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3206 if (phy->type != e1000_phy_igp_3)
3207 return E1000_SUCCESS;
3209 /* Call gig speed drop workaround on LPLU before accessing
3212 if (hw->mac.type == e1000_ich8lan)
3213 e1000_gig_downshift_workaround_ich8lan(hw);
3215 /* When LPLU is enabled, we should disable SmartSpeed */
3216 ret_val = phy->ops.read_reg(hw,
3217 IGP01E1000_PHY_PORT_CONFIG,
3222 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3223 ret_val = phy->ops.write_reg(hw,
3224 IGP01E1000_PHY_PORT_CONFIG,
3232 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3233 * @hw: pointer to the HW structure
3234 * @bank: pointer to the variable that returns the active bank
3236 * Reads signature byte from the NVM using the flash access registers.
3237 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3239 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3242 struct e1000_nvm_info *nvm = &hw->nvm;
3243 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3244 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3248 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3250 switch (hw->mac.type) {
3253 eecd = E1000_READ_REG(hw, E1000_EECD);
3254 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3255 E1000_EECD_SEC1VAL_VALID_MASK) {
3256 if (eecd & E1000_EECD_SEC1VAL)
3261 return E1000_SUCCESS;
3263 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3266 /* set bank to 0 in case flash read fails */
3270 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3274 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3275 E1000_ICH_NVM_SIG_VALUE) {
3277 return E1000_SUCCESS;
3281 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3286 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3287 E1000_ICH_NVM_SIG_VALUE) {
3289 return E1000_SUCCESS;
3292 DEBUGOUT("ERROR: No valid NVM bank present\n");
3293 return -E1000_ERR_NVM;
3298 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
3299 * @hw: pointer to the HW structure
3300 * @offset: The offset (in bytes) of the word(s) to read.
3301 * @words: Size of data to read in words
3302 * @data: Pointer to the word(s) to read at offset.
3304 * Reads a word(s) from the NVM using the flash access registers.
3306 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3309 struct e1000_nvm_info *nvm = &hw->nvm;
3310 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3312 s32 ret_val = E1000_SUCCESS;
3316 DEBUGFUNC("e1000_read_nvm_ich8lan");
3318 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3320 DEBUGOUT("nvm parameter(s) out of bounds\n");
3321 ret_val = -E1000_ERR_NVM;
3325 nvm->ops.acquire(hw);
3327 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3328 if (ret_val != E1000_SUCCESS) {
3329 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3333 act_offset = (bank) ? nvm->flash_bank_size : 0;
3334 act_offset += offset;
3336 ret_val = E1000_SUCCESS;
3337 for (i = 0; i < words; i++) {
3338 if (dev_spec->shadow_ram[offset+i].modified) {
3339 data[i] = dev_spec->shadow_ram[offset+i].value;
3341 ret_val = e1000_read_flash_word_ich8lan(hw,
3350 nvm->ops.release(hw);
3354 DEBUGOUT1("NVM read error: %d\n", ret_val);
3360 * e1000_flash_cycle_init_ich8lan - Initialize flash
3361 * @hw: pointer to the HW structure
3363 * This function does initial flash setup so that a new read/write/erase cycle
3366 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3368 union ich8_hws_flash_status hsfsts;
3369 s32 ret_val = -E1000_ERR_NVM;
3371 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3373 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3375 /* Check if the flash descriptor is valid */
3376 if (!hsfsts.hsf_status.fldesvalid) {
3377 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
3378 return -E1000_ERR_NVM;
3381 /* Clear FCERR and DAEL in hw status by writing 1 */
3382 hsfsts.hsf_status.flcerr = 1;
3383 hsfsts.hsf_status.dael = 1;
3385 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3387 /* Either we should have a hardware SPI cycle in progress
3388 * bit to check against, in order to start a new cycle or
3389 * FDONE bit should be changed in the hardware so that it
3390 * is 1 after hardware reset, which can then be used as an
3391 * indication whether a cycle is in progress or has been
3395 if (!hsfsts.hsf_status.flcinprog) {
3396 /* There is no cycle running at present,
3397 * so we can start a cycle.
3398 * Begin by setting Flash Cycle Done.
3400 hsfsts.hsf_status.flcdone = 1;
3401 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3402 ret_val = E1000_SUCCESS;
3406 /* Otherwise poll for sometime so the current
3407 * cycle has a chance to end before giving up.
3409 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3410 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3412 if (!hsfsts.hsf_status.flcinprog) {
3413 ret_val = E1000_SUCCESS;
3418 if (ret_val == E1000_SUCCESS) {
3419 /* Successful in waiting for previous cycle to timeout,
3420 * now set the Flash Cycle Done.
3422 hsfsts.hsf_status.flcdone = 1;
3423 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3426 DEBUGOUT("Flash controller busy, cannot get access\n");
3434 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3435 * @hw: pointer to the HW structure
3436 * @timeout: maximum time to wait for completion
3438 * This function starts a flash cycle and waits for its completion.
3440 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3442 union ich8_hws_flash_ctrl hsflctl;
3443 union ich8_hws_flash_status hsfsts;
3446 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3448 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3449 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3450 hsflctl.hsf_ctrl.flcgo = 1;
3451 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3453 /* wait till FDONE bit is set to 1 */
3455 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3456 if (hsfsts.hsf_status.flcdone)
3459 } while (i++ < timeout);
3461 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3462 return E1000_SUCCESS;
3464 return -E1000_ERR_NVM;
3468 * e1000_read_flash_word_ich8lan - Read word from flash
3469 * @hw: pointer to the HW structure
3470 * @offset: offset to data location
3471 * @data: pointer to the location for storing the data
3473 * Reads the flash word at offset into data. Offset is converted
3474 * to bytes before read.
3476 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3479 DEBUGFUNC("e1000_read_flash_word_ich8lan");
3482 return -E1000_ERR_NVM;
3484 /* Must convert offset into bytes. */
3487 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3491 * e1000_read_flash_byte_ich8lan - Read byte from flash
3492 * @hw: pointer to the HW structure
3493 * @offset: The offset of the byte to read.
3494 * @data: Pointer to a byte to store the value read.
3496 * Reads a single byte from the NVM using the flash access registers.
3498 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3504 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3510 return E1000_SUCCESS;
3514 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3515 * @hw: pointer to the HW structure
3516 * @offset: The offset (in bytes) of the byte or word to read.
3517 * @size: Size of data to read, 1=byte 2=word
3518 * @data: Pointer to the word to store the value read.
3520 * Reads a byte or word from the NVM using the flash access registers.
3522 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3525 union ich8_hws_flash_status hsfsts;
3526 union ich8_hws_flash_ctrl hsflctl;
3527 u32 flash_linear_addr;
3529 s32 ret_val = -E1000_ERR_NVM;
3532 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3534 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3535 return -E1000_ERR_NVM;
3537 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3538 hw->nvm.flash_base_addr);
3543 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3544 if (ret_val != E1000_SUCCESS)
3547 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3548 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3549 hsflctl.hsf_ctrl.fldbcount = size - 1;
3550 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3551 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3553 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3556 e1000_flash_cycle_ich8lan(hw,
3557 ICH_FLASH_READ_COMMAND_TIMEOUT);
3559 /* Check if FCERR is set to 1, if set to 1, clear it
3560 * and try the whole sequence a few more times, else
3561 * read in (shift in) the Flash Data0, the order is
3562 * least significant byte first msb to lsb
3564 if (ret_val == E1000_SUCCESS) {
3565 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3567 *data = (u8)(flash_data & 0x000000FF);
3569 *data = (u16)(flash_data & 0x0000FFFF);
3572 /* If we've gotten here, then things are probably
3573 * completely hosed, but if the error condition is
3574 * detected, it won't hurt to give it another try...
3575 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3577 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3579 if (hsfsts.hsf_status.flcerr) {
3580 /* Repeat for some time before giving up. */
3582 } else if (!hsfsts.hsf_status.flcdone) {
3583 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3587 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3593 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
3594 * @hw: pointer to the HW structure
3595 * @offset: The offset (in bytes) of the word(s) to write.
3596 * @words: Size of data to write in words
3597 * @data: Pointer to the word(s) to write at offset.
3599 * Writes a byte or word to the NVM using the flash access registers.
3601 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3604 struct e1000_nvm_info *nvm = &hw->nvm;
3605 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3608 DEBUGFUNC("e1000_write_nvm_ich8lan");
3610 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3612 DEBUGOUT("nvm parameter(s) out of bounds\n");
3613 return -E1000_ERR_NVM;
3616 nvm->ops.acquire(hw);
3618 for (i = 0; i < words; i++) {
3619 dev_spec->shadow_ram[offset+i].modified = true;
3620 dev_spec->shadow_ram[offset+i].value = data[i];
3623 nvm->ops.release(hw);
3625 return E1000_SUCCESS;
3629 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3630 * @hw: pointer to the HW structure
3632 * The NVM checksum is updated by calling the generic update_nvm_checksum,
3633 * which writes the checksum to the shadow ram. The changes in the shadow
3634 * ram are then committed to the EEPROM by processing each bank at a time
3635 * checking for the modified bit and writing only the pending changes.
3636 * After a successful commit, the shadow ram is cleared and is ready for
3639 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3641 struct e1000_nvm_info *nvm = &hw->nvm;
3642 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3643 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3647 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3649 ret_val = e1000_update_nvm_checksum_generic(hw);
3653 if (nvm->type != e1000_nvm_flash_sw)
3656 nvm->ops.acquire(hw);
3658 /* We're writing to the opposite bank so if we're on bank 1,
3659 * write to bank 0 etc. We also need to erase the segment that
3660 * is going to be written
3662 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3663 if (ret_val != E1000_SUCCESS) {
3664 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3669 new_bank_offset = nvm->flash_bank_size;
3670 old_bank_offset = 0;
3671 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3675 old_bank_offset = nvm->flash_bank_size;
3676 new_bank_offset = 0;
3677 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3682 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3683 /* Determine whether to write the value stored
3684 * in the other NVM bank or a modified value stored
3687 if (dev_spec->shadow_ram[i].modified) {
3688 data = dev_spec->shadow_ram[i].value;
3690 ret_val = e1000_read_flash_word_ich8lan(hw, i +
3697 /* If the word is 0x13, then make sure the signature bits
3698 * (15:14) are 11b until the commit has completed.
3699 * This will allow us to write 10b which indicates the
3700 * signature is valid. We want to do this after the write
3701 * has completed so that we don't mark the segment valid
3702 * while the write is still in progress
3704 if (i == E1000_ICH_NVM_SIG_WORD)
3705 data |= E1000_ICH_NVM_SIG_MASK;
3707 /* Convert offset to bytes. */
3708 act_offset = (i + new_bank_offset) << 1;
3711 /* Write the bytes to the new bank. */
3712 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3719 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3726 /* Don't bother writing the segment valid bits if sector
3727 * programming failed.
3730 DEBUGOUT("Flash commit failed.\n");
3734 /* Finally validate the new segment by setting bit 15:14
3735 * to 10b in word 0x13 , this can be done without an
3736 * erase as well since these bits are 11 to start with
3737 * and we need to change bit 14 to 0b
3739 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3740 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3745 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3751 /* And invalidate the previously valid segment by setting
3752 * its signature word (0x13) high_byte to 0b. This can be
3753 * done without an erase because flash erase sets all bits
3754 * to 1's. We can write 1's to 0's without an erase
3756 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3757 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3761 /* Great! Everything worked, we can now clear the cached entries. */
3762 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3763 dev_spec->shadow_ram[i].modified = false;
3764 dev_spec->shadow_ram[i].value = 0xFFFF;
3768 nvm->ops.release(hw);
3770 /* Reload the EEPROM, or else modifications will not appear
3771 * until after the next adapter reset.
3774 nvm->ops.reload(hw);
3780 DEBUGOUT1("NVM update error: %d\n", ret_val);
3786 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3787 * @hw: pointer to the HW structure
3789 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3790 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
3791 * calculated, in which case we need to calculate the checksum and set bit 6.
3793 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3798 u16 valid_csum_mask;
3800 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3802 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
3803 * the checksum needs to be fixed. This bit is an indication that
3804 * the NVM was prepared by OEM software and did not calculate
3805 * the checksum...a likely scenario.
3807 switch (hw->mac.type) {
3810 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3813 word = NVM_FUTURE_INIT_WORD1;
3814 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3818 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3822 if (!(data & valid_csum_mask)) {
3823 data |= valid_csum_mask;
3824 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3827 ret_val = hw->nvm.ops.update(hw);
3832 return e1000_validate_nvm_checksum_generic(hw);
3836 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3837 * @hw: pointer to the HW structure
3838 * @offset: The offset (in bytes) of the byte/word to read.
3839 * @size: Size of data to read, 1=byte 2=word
3840 * @data: The byte(s) to write to the NVM.
3842 * Writes one/two bytes to the NVM using the flash access registers.
3844 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3847 union ich8_hws_flash_status hsfsts;
3848 union ich8_hws_flash_ctrl hsflctl;
3849 u32 flash_linear_addr;
3854 DEBUGFUNC("e1000_write_ich8_data");
3856 if (size < 1 || size > 2 || data > size * 0xff ||
3857 offset > ICH_FLASH_LINEAR_ADDR_MASK)
3858 return -E1000_ERR_NVM;
3860 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3861 hw->nvm.flash_base_addr);
3866 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3867 if (ret_val != E1000_SUCCESS)
3870 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3871 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3872 hsflctl.hsf_ctrl.fldbcount = size - 1;
3873 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3874 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3876 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3879 flash_data = (u32)data & 0x00FF;
3881 flash_data = (u32)data;
3883 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3885 /* check if FCERR is set to 1 , if set to 1, clear it
3886 * and try the whole sequence a few more times else done
3889 e1000_flash_cycle_ich8lan(hw,
3890 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3891 if (ret_val == E1000_SUCCESS)
3894 /* If we're here, then things are most likely
3895 * completely hosed, but if the error condition
3896 * is detected, it won't hurt to give it another
3897 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3899 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3900 if (hsfsts.hsf_status.flcerr)
3901 /* Repeat for some time before giving up. */
3903 if (!hsfsts.hsf_status.flcdone) {
3904 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3907 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3913 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3914 * @hw: pointer to the HW structure
3915 * @offset: The index of the byte to read.
3916 * @data: The byte to write to the NVM.
3918 * Writes a single byte to the NVM using the flash access registers.
3920 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3923 u16 word = (u16)data;
3925 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3927 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3931 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3932 * @hw: pointer to the HW structure
3933 * @offset: The offset of the byte to write.
3934 * @byte: The byte to write to the NVM.
3936 * Writes a single byte to the NVM using the flash access registers.
3937 * Goes through a retry algorithm before giving up.
3939 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3940 u32 offset, u8 byte)
3943 u16 program_retries;
3945 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3947 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3951 for (program_retries = 0; program_retries < 100; program_retries++) {
3952 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3954 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3955 if (ret_val == E1000_SUCCESS)
3958 if (program_retries == 100)
3959 return -E1000_ERR_NVM;
3961 return E1000_SUCCESS;
3965 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3966 * @hw: pointer to the HW structure
3967 * @bank: 0 for first bank, 1 for second bank, etc.
3969 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3970 * bank N is 4096 * N + flash_reg_addr.
3972 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3974 struct e1000_nvm_info *nvm = &hw->nvm;
3975 union ich8_hws_flash_status hsfsts;
3976 union ich8_hws_flash_ctrl hsflctl;
3977 u32 flash_linear_addr;
3978 /* bank size is in 16bit words - adjust to bytes */
3979 u32 flash_bank_size = nvm->flash_bank_size * 2;
3982 s32 j, iteration, sector_size;
3984 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3986 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3988 /* Determine HW Sector size: Read BERASE bits of hw flash status
3990 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3991 * consecutive sectors. The start index for the nth Hw sector
3992 * can be calculated as = bank * 4096 + n * 256
3993 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3994 * The start index for the nth Hw sector can be calculated
3996 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3997 * (ich9 only, otherwise error condition)
3998 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4000 switch (hsfsts.hsf_status.berasesz) {
4002 /* Hw sector size 256 */
4003 sector_size = ICH_FLASH_SEG_SIZE_256;
4004 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4007 sector_size = ICH_FLASH_SEG_SIZE_4K;
4011 sector_size = ICH_FLASH_SEG_SIZE_8K;
4015 sector_size = ICH_FLASH_SEG_SIZE_64K;
4019 return -E1000_ERR_NVM;
4022 /* Start with the base address, then add the sector offset. */
4023 flash_linear_addr = hw->nvm.flash_base_addr;
4024 flash_linear_addr += (bank) ? flash_bank_size : 0;
4026 for (j = 0; j < iteration; j++) {
4028 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4031 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4035 /* Write a value 11 (block Erase) in Flash
4036 * Cycle field in hw flash control
4038 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4040 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4041 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4044 /* Write the last 24 bits of an index within the
4045 * block into Flash Linear address field in Flash
4048 flash_linear_addr += (j * sector_size);
4049 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4052 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4053 if (ret_val == E1000_SUCCESS)
4056 /* Check if FCERR is set to 1. If 1,
4057 * clear it and try the whole sequence
4058 * a few more times else Done
4060 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4062 if (hsfsts.hsf_status.flcerr)
4063 /* repeat for some time before giving up */
4065 else if (!hsfsts.hsf_status.flcdone)
4067 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4070 return E1000_SUCCESS;
4074 * e1000_valid_led_default_ich8lan - Set the default LED settings
4075 * @hw: pointer to the HW structure
4076 * @data: Pointer to the LED settings
4078 * Reads the LED default settings from the NVM to data. If the NVM LED
4079 * settings is all 0's or F's, set the LED default to a valid LED default
4082 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4086 DEBUGFUNC("e1000_valid_led_default_ich8lan");
4088 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4090 DEBUGOUT("NVM Read Error\n");
4094 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4095 *data = ID_LED_DEFAULT_ICH8LAN;
4097 return E1000_SUCCESS;
4101 * e1000_id_led_init_pchlan - store LED configurations
4102 * @hw: pointer to the HW structure
4104 * PCH does not control LEDs via the LEDCTL register, rather it uses
4105 * the PHY LED configuration register.
4107 * PCH also does not have an "always on" or "always off" mode which
4108 * complicates the ID feature. Instead of using the "on" mode to indicate
4109 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4110 * use "link_up" mode. The LEDs will still ID on request if there is no
4111 * link based on logic in e1000_led_[on|off]_pchlan().
4113 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4115 struct e1000_mac_info *mac = &hw->mac;
4117 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4118 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4119 u16 data, i, temp, shift;
4121 DEBUGFUNC("e1000_id_led_init_pchlan");
4123 /* Get default ID LED modes */
4124 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4128 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4129 mac->ledctl_mode1 = mac->ledctl_default;
4130 mac->ledctl_mode2 = mac->ledctl_default;
4132 for (i = 0; i < 4; i++) {
4133 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4136 case ID_LED_ON1_DEF2:
4137 case ID_LED_ON1_ON2:
4138 case ID_LED_ON1_OFF2:
4139 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4140 mac->ledctl_mode1 |= (ledctl_on << shift);
4142 case ID_LED_OFF1_DEF2:
4143 case ID_LED_OFF1_ON2:
4144 case ID_LED_OFF1_OFF2:
4145 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4146 mac->ledctl_mode1 |= (ledctl_off << shift);
4153 case ID_LED_DEF1_ON2:
4154 case ID_LED_ON1_ON2:
4155 case ID_LED_OFF1_ON2:
4156 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4157 mac->ledctl_mode2 |= (ledctl_on << shift);
4159 case ID_LED_DEF1_OFF2:
4160 case ID_LED_ON1_OFF2:
4161 case ID_LED_OFF1_OFF2:
4162 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4163 mac->ledctl_mode2 |= (ledctl_off << shift);
4171 return E1000_SUCCESS;
4175 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4176 * @hw: pointer to the HW structure
4178 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4179 * register, so the the bus width is hard coded.
4181 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4183 struct e1000_bus_info *bus = &hw->bus;
4186 DEBUGFUNC("e1000_get_bus_info_ich8lan");
4188 ret_val = e1000_get_bus_info_pcie_generic(hw);
4190 /* ICH devices are "PCI Express"-ish. They have
4191 * a configuration space, but do not contain
4192 * PCI Express Capability registers, so bus width
4193 * must be hardcoded.
4195 if (bus->width == e1000_bus_width_unknown)
4196 bus->width = e1000_bus_width_pcie_x1;
4202 * e1000_reset_hw_ich8lan - Reset the hardware
4203 * @hw: pointer to the HW structure
4205 * Does a full reset of the hardware which includes a reset of the PHY and
4208 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4210 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4215 DEBUGFUNC("e1000_reset_hw_ich8lan");
4217 /* Prevent the PCI-E bus from sticking if there is no TLP connection
4218 * on the last TLP read/write transaction when MAC is reset.
4220 ret_val = e1000_disable_pcie_master_generic(hw);
4222 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4224 DEBUGOUT("Masking off all interrupts\n");
4225 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4227 /* Disable the Transmit and Receive units. Then delay to allow
4228 * any pending transactions to complete before we hit the MAC
4229 * with the global reset.
4231 E1000_WRITE_REG(hw, E1000_RCTL, 0);
4232 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4233 E1000_WRITE_FLUSH(hw);
4237 /* Workaround for ICH8 bit corruption issue in FIFO memory */
4238 if (hw->mac.type == e1000_ich8lan) {
4239 /* Set Tx and Rx buffer allocation to 8k apiece. */
4240 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4241 /* Set Packet Buffer Size to 16k. */
4242 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4245 if (hw->mac.type == e1000_pchlan) {
4246 /* Save the NVM K1 bit setting*/
4247 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4251 if (kum_cfg & E1000_NVM_K1_ENABLE)
4252 dev_spec->nvm_k1_enabled = true;
4254 dev_spec->nvm_k1_enabled = false;
4257 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4259 if (!hw->phy.ops.check_reset_block(hw)) {
4260 /* Full-chip reset requires MAC and PHY reset at the same
4261 * time to make sure the interface between MAC and the
4262 * external PHY is reset.
4264 ctrl |= E1000_CTRL_PHY_RST;
4266 /* Gate automatic PHY configuration by hardware on
4269 if ((hw->mac.type == e1000_pch2lan) &&
4270 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4271 e1000_gate_hw_phy_config_ich8lan(hw, true);
4273 ret_val = e1000_acquire_swflag_ich8lan(hw);
4274 DEBUGOUT("Issuing a global reset to ich8lan\n");
4275 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4276 /* cannot issue a flush here because it hangs the hardware */
4279 /* Set Phy Config Counter to 50msec */
4280 if (hw->mac.type == e1000_pch2lan) {
4281 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4282 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4283 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4284 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4288 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4290 if (ctrl & E1000_CTRL_PHY_RST) {
4291 ret_val = hw->phy.ops.get_cfg_done(hw);
4295 ret_val = e1000_post_phy_reset_ich8lan(hw);
4300 /* For PCH, this write will make sure that any noise
4301 * will be detected as a CRC error and be dropped rather than show up
4302 * as a bad packet to the DMA engine.
4304 if (hw->mac.type == e1000_pchlan)
4305 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4307 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4308 E1000_READ_REG(hw, E1000_ICR);
4310 reg = E1000_READ_REG(hw, E1000_KABGTXD);
4311 reg |= E1000_KABGTXD_BGSQLBIAS;
4312 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4314 return E1000_SUCCESS;
4318 * e1000_init_hw_ich8lan - Initialize the hardware
4319 * @hw: pointer to the HW structure
4321 * Prepares the hardware for transmit and receive by doing the following:
4322 * - initialize hardware bits
4323 * - initialize LED identification
4324 * - setup receive address registers
4325 * - setup flow control
4326 * - setup transmit descriptors
4327 * - clear statistics
4329 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4331 struct e1000_mac_info *mac = &hw->mac;
4332 u32 ctrl_ext, txdctl, snoop;
4336 DEBUGFUNC("e1000_init_hw_ich8lan");
4338 e1000_initialize_hw_bits_ich8lan(hw);
4340 /* Initialize identification LED */
4341 ret_val = mac->ops.id_led_init(hw);
4342 /* An error is not fatal and we should not stop init due to this */
4344 DEBUGOUT("Error initializing identification LED\n");
4346 /* Setup the receive address. */
4347 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4349 /* Zero out the Multicast HASH table */
4350 DEBUGOUT("Zeroing the MTA\n");
4351 for (i = 0; i < mac->mta_reg_count; i++)
4352 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4354 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4355 * the ME. Disable wakeup by clearing the host wakeup bit.
4356 * Reset the phy after disabling host wakeup to reset the Rx buffer.
4358 if (hw->phy.type == e1000_phy_82578) {
4359 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4360 i &= ~BM_WUC_HOST_WU_BIT;
4361 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4362 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4367 /* Setup link and flow control */
4368 ret_val = mac->ops.setup_link(hw);
4370 /* Set the transmit descriptor write-back policy for both queues */
4371 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4372 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4373 E1000_TXDCTL_FULL_TX_DESC_WB);
4374 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4375 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4376 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4377 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4378 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4379 E1000_TXDCTL_FULL_TX_DESC_WB);
4380 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4381 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4382 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4384 /* ICH8 has opposite polarity of no_snoop bits.
4385 * By default, we should use snoop behavior.
4387 if (mac->type == e1000_ich8lan)
4388 snoop = PCIE_ICH8_SNOOP_ALL;
4390 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4391 e1000_set_pcie_no_snoop_generic(hw, snoop);
4393 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4394 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4395 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4397 /* Clear all of the statistics registers (clear on read). It is
4398 * important that we do this after we have tried to establish link
4399 * because the symbol error count will increment wildly if there
4402 e1000_clear_hw_cntrs_ich8lan(hw);
4408 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4409 * @hw: pointer to the HW structure
4411 * Sets/Clears required hardware bits necessary for correctly setting up the
4412 * hardware for transmit and receive.
4414 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4418 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4420 /* Extended Device Control */
4421 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4423 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4424 if (hw->mac.type >= e1000_pchlan)
4425 reg |= E1000_CTRL_EXT_PHYPDEN;
4426 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4428 /* Transmit Descriptor Control 0 */
4429 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4431 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4433 /* Transmit Descriptor Control 1 */
4434 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4436 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4438 /* Transmit Arbitration Control 0 */
4439 reg = E1000_READ_REG(hw, E1000_TARC(0));
4440 if (hw->mac.type == e1000_ich8lan)
4441 reg |= (1 << 28) | (1 << 29);
4442 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4443 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4445 /* Transmit Arbitration Control 1 */
4446 reg = E1000_READ_REG(hw, E1000_TARC(1));
4447 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4451 reg |= (1 << 24) | (1 << 26) | (1 << 30);
4452 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4455 if (hw->mac.type == e1000_ich8lan) {
4456 reg = E1000_READ_REG(hw, E1000_STATUS);
4458 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4461 /* work-around descriptor data corruption issue during nfs v2 udp
4462 * traffic, just disable the nfs filtering capability
4464 reg = E1000_READ_REG(hw, E1000_RFCTL);
4465 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4467 /* Disable IPv6 extension header parsing because some malformed
4468 * IPv6 headers can hang the Rx.
4470 if (hw->mac.type == e1000_ich8lan)
4471 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4472 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4474 /* Enable ECC on Lynxpoint */
4475 if (hw->mac.type == e1000_pch_lpt) {
4476 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4477 reg |= E1000_PBECCSTS_ECC_ENABLE;
4478 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4480 reg = E1000_READ_REG(hw, E1000_CTRL);
4481 reg |= E1000_CTRL_MEHE;
4482 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4489 * e1000_setup_link_ich8lan - Setup flow control and link settings
4490 * @hw: pointer to the HW structure
4492 * Determines which flow control settings to use, then configures flow
4493 * control. Calls the appropriate media-specific link configuration
4494 * function. Assuming the adapter has a valid link partner, a valid link
4495 * should be established. Assumes the hardware has previously been reset
4496 * and the transmitter and receiver are not enabled.
4498 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4502 DEBUGFUNC("e1000_setup_link_ich8lan");
4504 if (hw->phy.ops.check_reset_block(hw))
4505 return E1000_SUCCESS;
4507 /* ICH parts do not have a word in the NVM to determine
4508 * the default flow control setting, so we explicitly
4511 if (hw->fc.requested_mode == e1000_fc_default)
4512 hw->fc.requested_mode = e1000_fc_full;
4514 /* Save off the requested flow control mode for use later. Depending
4515 * on the link partner's capabilities, we may or may not use this mode.
4517 hw->fc.current_mode = hw->fc.requested_mode;
4519 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4520 hw->fc.current_mode);
4522 /* Continue to configure the copper link. */
4523 ret_val = hw->mac.ops.setup_physical_interface(hw);
4527 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4528 if ((hw->phy.type == e1000_phy_82578) ||
4529 (hw->phy.type == e1000_phy_82579) ||
4530 (hw->phy.type == e1000_phy_i217) ||
4531 (hw->phy.type == e1000_phy_82577)) {
4532 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4534 ret_val = hw->phy.ops.write_reg(hw,
4535 PHY_REG(BM_PORT_CTRL_PAGE, 27),
4541 return e1000_set_fc_watermarks_generic(hw);
4545 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4546 * @hw: pointer to the HW structure
4548 * Configures the kumeran interface to the PHY to wait the appropriate time
4549 * when polling the PHY, then call the generic setup_copper_link to finish
4550 * configuring the copper link.
4552 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4558 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4560 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4561 ctrl |= E1000_CTRL_SLU;
4562 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4563 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4565 /* Set the mac to wait the maximum time between each iteration
4566 * and increase the max iterations when polling the phy;
4567 * this fixes erroneous timeouts at 10Mbps.
4569 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4573 ret_val = e1000_read_kmrn_reg_generic(hw,
4574 E1000_KMRNCTRLSTA_INBAND_PARAM,
4579 ret_val = e1000_write_kmrn_reg_generic(hw,
4580 E1000_KMRNCTRLSTA_INBAND_PARAM,
4585 switch (hw->phy.type) {
4586 case e1000_phy_igp_3:
4587 ret_val = e1000_copper_link_setup_igp(hw);
4592 case e1000_phy_82578:
4593 ret_val = e1000_copper_link_setup_m88(hw);
4597 case e1000_phy_82577:
4598 case e1000_phy_82579:
4599 ret_val = e1000_copper_link_setup_82577(hw);
4604 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4609 reg_data &= ~IFE_PMC_AUTO_MDIX;
4611 switch (hw->phy.mdix) {
4613 reg_data &= ~IFE_PMC_FORCE_MDIX;
4616 reg_data |= IFE_PMC_FORCE_MDIX;
4620 reg_data |= IFE_PMC_AUTO_MDIX;
4623 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4632 return e1000_setup_copper_link_generic(hw);
4636 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4637 * @hw: pointer to the HW structure
4639 * Calls the PHY specific link setup function and then calls the
4640 * generic setup_copper_link to finish configuring the link for
4641 * Lynxpoint PCH devices
4643 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4648 DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4650 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4651 ctrl |= E1000_CTRL_SLU;
4652 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4653 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4655 ret_val = e1000_copper_link_setup_82577(hw);
4659 return e1000_setup_copper_link_generic(hw);
4663 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4664 * @hw: pointer to the HW structure
4665 * @speed: pointer to store current link speed
4666 * @duplex: pointer to store the current link duplex
4668 * Calls the generic get_speed_and_duplex to retrieve the current link
4669 * information and then calls the Kumeran lock loss workaround for links at
4672 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4677 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4679 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4683 if ((hw->mac.type == e1000_ich8lan) &&
4684 (hw->phy.type == e1000_phy_igp_3) &&
4685 (*speed == SPEED_1000)) {
4686 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4693 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4694 * @hw: pointer to the HW structure
4696 * Work-around for 82566 Kumeran PCS lock loss:
4697 * On link status change (i.e. PCI reset, speed change) and link is up and
4699 * 0) if workaround is optionally disabled do nothing
4700 * 1) wait 1ms for Kumeran link to come up
4701 * 2) check Kumeran Diagnostic register PCS lock loss bit
4702 * 3) if not set the link is locked (all is good), otherwise...
4704 * 5) repeat up to 10 times
4705 * Note: this is only called for IGP3 copper when speed is 1gb.
4707 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4709 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4715 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4717 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4718 return E1000_SUCCESS;
4720 /* Make sure link is up before proceeding. If not just return.
4721 * Attempting this while link is negotiating fouled up link
4724 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4726 return E1000_SUCCESS;
4728 for (i = 0; i < 10; i++) {
4729 /* read once to clear */
4730 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4733 /* and again to get new status */
4734 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4738 /* check for PCS lock */
4739 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4740 return E1000_SUCCESS;
4742 /* Issue PHY reset */
4743 hw->phy.ops.reset(hw);
4746 /* Disable GigE link negotiation */
4747 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4748 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4749 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4750 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4752 /* Call gig speed drop workaround on Gig disable before accessing
4755 e1000_gig_downshift_workaround_ich8lan(hw);
4757 /* unable to acquire PCS lock */
4758 return -E1000_ERR_PHY;
4762 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4763 * @hw: pointer to the HW structure
4764 * @state: boolean value used to set the current Kumeran workaround state
4766 * If ICH8, set the current Kumeran workaround state (enabled - true
4767 * /disabled - false).
4769 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4772 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4774 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4776 if (hw->mac.type != e1000_ich8lan) {
4777 DEBUGOUT("Workaround applies to ICH8 only.\n");
4781 dev_spec->kmrn_lock_loss_workaround_enabled = state;
4787 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4788 * @hw: pointer to the HW structure
4790 * Workaround for 82566 power-down on D3 entry:
4791 * 1) disable gigabit link
4792 * 2) write VR power-down enable
4794 * Continue if successful, else issue LCD reset and repeat
4796 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4802 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4804 if (hw->phy.type != e1000_phy_igp_3)
4807 /* Try the workaround twice (if needed) */
4810 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4811 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4812 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4813 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4815 /* Call gig speed drop workaround on Gig disable before
4816 * accessing any PHY registers
4818 if (hw->mac.type == e1000_ich8lan)
4819 e1000_gig_downshift_workaround_ich8lan(hw);
4821 /* Write VR power-down enable */
4822 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4823 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4824 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4825 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4827 /* Read it back and test */
4828 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4829 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4830 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4833 /* Issue PHY reset and repeat at most one more time */
4834 reg = E1000_READ_REG(hw, E1000_CTRL);
4835 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4841 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4842 * @hw: pointer to the HW structure
4844 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4845 * LPLU, Gig disable, MDIC PHY reset):
4846 * 1) Set Kumeran Near-end loopback
4847 * 2) Clear Kumeran Near-end loopback
4848 * Should only be called for ICH8[m] devices with any 1G Phy.
4850 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4855 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4857 if ((hw->mac.type != e1000_ich8lan) ||
4858 (hw->phy.type == e1000_phy_ife))
4861 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4865 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4866 ret_val = e1000_write_kmrn_reg_generic(hw,
4867 E1000_KMRNCTRLSTA_DIAG_OFFSET,
4871 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4872 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4877 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4878 * @hw: pointer to the HW structure
4880 * During S0 to Sx transition, it is possible the link remains at gig
4881 * instead of negotiating to a lower speed. Before going to Sx, set
4882 * 'Gig Disable' to force link speed negotiation to a lower speed based on
4883 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
4884 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4885 * needs to be written.
4886 * Parts that support (and are linked to a partner which support) EEE in
4887 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4888 * than 10Mbps w/o EEE.
4890 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4892 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4896 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4898 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4899 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4901 if (hw->phy.type == e1000_phy_i217) {
4902 u16 phy_reg, device_id = hw->device_id;
4904 #ifdef NAHUM6_WPT_HW
4905 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4906 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4907 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4908 (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4909 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4911 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4912 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4915 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4916 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4917 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4919 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4920 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4924 ret_val = hw->phy.ops.acquire(hw);
4928 if (!dev_spec->eee_disable) {
4932 e1000_read_emi_reg_locked(hw,
4933 I217_EEE_ADVERTISEMENT,
4938 /* Disable LPLU if both link partners support 100BaseT
4939 * EEE and 100Full is advertised on both ends of the
4940 * link, and enable Auto Enable LPI since there will
4941 * be no driver to enable LPI while in Sx.
4943 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4944 (dev_spec->eee_lp_ability &
4945 I82579_EEE_100_SUPPORTED) &&
4946 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4947 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4948 E1000_PHY_CTRL_NOND0A_LPLU);
4950 /* Set Auto Enable LPI after link up */
4951 hw->phy.ops.read_reg_locked(hw,
4954 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4955 hw->phy.ops.write_reg_locked(hw,
4961 /* For i217 Intel Rapid Start Technology support,
4962 * when the system is going into Sx and no manageability engine
4963 * is present, the driver must configure proxy to reset only on
4964 * power good. LPI (Low Power Idle) state must also reset only
4965 * on power good, as well as the MTA (Multicast table array).
4966 * The SMBus release must also be disabled on LCD reset.
4968 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4969 E1000_ICH_FWSM_FW_VALID)) {
4970 /* Enable proxy to reset only on power good. */
4971 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4973 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4974 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4977 /* Set bit enable LPI (EEE) to reset only on
4980 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4981 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4982 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4984 /* Disable the SMB release on LCD reset. */
4985 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4986 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4987 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4990 /* Enable MTA to reset for Intel Rapid Start Technology
4993 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4994 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4995 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4998 hw->phy.ops.release(hw);
5001 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5003 if (hw->mac.type == e1000_ich8lan)
5004 e1000_gig_downshift_workaround_ich8lan(hw);
5006 if (hw->mac.type >= e1000_pchlan) {
5007 e1000_oem_bits_config_ich8lan(hw, false);
5009 /* Reset PHY to activate OEM bits on 82577/8 */
5010 if (hw->mac.type == e1000_pchlan)
5011 e1000_phy_hw_reset_generic(hw);
5013 ret_val = hw->phy.ops.acquire(hw);
5016 e1000_write_smbus_addr(hw);
5017 hw->phy.ops.release(hw);
5024 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5025 * @hw: pointer to the HW structure
5027 * During Sx to S0 transitions on non-managed devices or managed devices
5028 * on which PHY resets are not blocked, if the PHY registers cannot be
5029 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
5031 * On i217, setup Intel Rapid Start Technology.
5033 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5037 DEBUGFUNC("e1000_resume_workarounds_pchlan");
5039 if (hw->mac.type < e1000_pch2lan)
5042 ret_val = e1000_init_phy_workarounds_pchlan(hw);
5044 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5048 /* For i217 Intel Rapid Start Technology support when the system
5049 * is transitioning from Sx and no manageability engine is present
5050 * configure SMBus to restore on reset, disable proxy, and enable
5051 * the reset on MTA (Multicast table array).
5053 if (hw->phy.type == e1000_phy_i217) {
5056 ret_val = hw->phy.ops.acquire(hw);
5058 DEBUGOUT("Failed to setup iRST\n");
5062 /* Clear Auto Enable LPI after link up */
5063 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5064 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5065 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5067 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5068 E1000_ICH_FWSM_FW_VALID)) {
5069 /* Restore clear on SMB if no manageability engine
5072 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5076 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5077 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5080 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5082 /* Enable reset on MTA */
5083 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5087 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5088 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5091 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5092 hw->phy.ops.release(hw);
5097 * e1000_cleanup_led_ich8lan - Restore the default LED operation
5098 * @hw: pointer to the HW structure
5100 * Return the LED back to the default configuration.
5102 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5104 DEBUGFUNC("e1000_cleanup_led_ich8lan");
5106 if (hw->phy.type == e1000_phy_ife)
5107 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5110 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5111 return E1000_SUCCESS;
5115 * e1000_led_on_ich8lan - Turn LEDs on
5116 * @hw: pointer to the HW structure
5120 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5122 DEBUGFUNC("e1000_led_on_ich8lan");
5124 if (hw->phy.type == e1000_phy_ife)
5125 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5126 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5128 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5129 return E1000_SUCCESS;
5133 * e1000_led_off_ich8lan - Turn LEDs off
5134 * @hw: pointer to the HW structure
5136 * Turn off the LEDs.
5138 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5140 DEBUGFUNC("e1000_led_off_ich8lan");
5142 if (hw->phy.type == e1000_phy_ife)
5143 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5144 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5146 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5147 return E1000_SUCCESS;
5151 * e1000_setup_led_pchlan - Configures SW controllable LED
5152 * @hw: pointer to the HW structure
5154 * This prepares the SW controllable LED for use.
5156 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5158 DEBUGFUNC("e1000_setup_led_pchlan");
5160 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5161 (u16)hw->mac.ledctl_mode1);
5165 * e1000_cleanup_led_pchlan - Restore the default LED operation
5166 * @hw: pointer to the HW structure
5168 * Return the LED back to the default configuration.
5170 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5172 DEBUGFUNC("e1000_cleanup_led_pchlan");
5174 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5175 (u16)hw->mac.ledctl_default);
5179 * e1000_led_on_pchlan - Turn LEDs on
5180 * @hw: pointer to the HW structure
5184 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5186 u16 data = (u16)hw->mac.ledctl_mode2;
5189 DEBUGFUNC("e1000_led_on_pchlan");
5191 /* If no link, then turn LED on by setting the invert bit
5192 * for each LED that's mode is "link_up" in ledctl_mode2.
5194 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5195 for (i = 0; i < 3; i++) {
5196 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5197 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5198 E1000_LEDCTL_MODE_LINK_UP)
5200 if (led & E1000_PHY_LED0_IVRT)
5201 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5203 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5207 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5211 * e1000_led_off_pchlan - Turn LEDs off
5212 * @hw: pointer to the HW structure
5214 * Turn off the LEDs.
5216 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5218 u16 data = (u16)hw->mac.ledctl_mode1;
5221 DEBUGFUNC("e1000_led_off_pchlan");
5223 /* If no link, then turn LED off by clearing the invert bit
5224 * for each LED that's mode is "link_up" in ledctl_mode1.
5226 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5227 for (i = 0; i < 3; i++) {
5228 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5229 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5230 E1000_LEDCTL_MODE_LINK_UP)
5232 if (led & E1000_PHY_LED0_IVRT)
5233 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5235 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5239 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5243 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5244 * @hw: pointer to the HW structure
5246 * Read appropriate register for the config done bit for completion status
5247 * and configure the PHY through s/w for EEPROM-less parts.
5249 * NOTE: some silicon which is EEPROM-less will fail trying to read the
5250 * config done bit, so only an error is logged and continues. If we were
5251 * to return with error, EEPROM-less silicon would not be able to be reset
5254 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5256 s32 ret_val = E1000_SUCCESS;
5260 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5262 e1000_get_cfg_done_generic(hw);
5264 /* Wait for indication from h/w that it has completed basic config */
5265 if (hw->mac.type >= e1000_ich10lan) {
5266 e1000_lan_init_done_ich8lan(hw);
5268 ret_val = e1000_get_auto_rd_done_generic(hw);
5270 /* When auto config read does not complete, do not
5271 * return with an error. This can happen in situations
5272 * where there is no eeprom and prevents getting link.
5274 DEBUGOUT("Auto Read Done did not complete\n");
5275 ret_val = E1000_SUCCESS;
5279 /* Clear PHY Reset Asserted bit */
5280 status = E1000_READ_REG(hw, E1000_STATUS);
5281 if (status & E1000_STATUS_PHYRA)
5282 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5284 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5286 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5287 if (hw->mac.type <= e1000_ich9lan) {
5288 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5289 (hw->phy.type == e1000_phy_igp_3)) {
5290 e1000_phy_init_script_igp3(hw);
5293 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5294 /* Maybe we should do a basic PHY config */
5295 DEBUGOUT("EEPROM not present\n");
5296 ret_val = -E1000_ERR_CONFIG;
5304 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5305 * @hw: pointer to the HW structure
5307 * In the case of a PHY power down to save power, or to turn off link during a
5308 * driver unload, or wake on lan is not enabled, remove the link.
5310 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5312 /* If the management interface is not enabled, then power down */
5313 if (!(hw->mac.ops.check_mng_mode(hw) ||
5314 hw->phy.ops.check_reset_block(hw)))
5315 e1000_power_down_phy_copper(hw);
5321 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5322 * @hw: pointer to the HW structure
5324 * Clears hardware counters specific to the silicon family and calls
5325 * clear_hw_cntrs_generic to clear all general purpose counters.
5327 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5332 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5334 e1000_clear_hw_cntrs_base_generic(hw);
5336 E1000_READ_REG(hw, E1000_ALGNERRC);
5337 E1000_READ_REG(hw, E1000_RXERRC);
5338 E1000_READ_REG(hw, E1000_TNCRS);
5339 E1000_READ_REG(hw, E1000_CEXTERR);
5340 E1000_READ_REG(hw, E1000_TSCTC);
5341 E1000_READ_REG(hw, E1000_TSCTFC);
5343 E1000_READ_REG(hw, E1000_MGTPRC);
5344 E1000_READ_REG(hw, E1000_MGTPDC);
5345 E1000_READ_REG(hw, E1000_MGTPTC);
5347 E1000_READ_REG(hw, E1000_IAC);
5348 E1000_READ_REG(hw, E1000_ICRXOC);
5350 /* Clear PHY statistics registers */
5351 if ((hw->phy.type == e1000_phy_82578) ||
5352 (hw->phy.type == e1000_phy_82579) ||
5353 (hw->phy.type == e1000_phy_i217) ||
5354 (hw->phy.type == e1000_phy_82577)) {
5355 ret_val = hw->phy.ops.acquire(hw);
5358 ret_val = hw->phy.ops.set_page(hw,
5359 HV_STATS_PAGE << IGP_PAGE_SHIFT);
5362 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5363 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5364 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5365 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5366 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5367 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5368 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5369 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5370 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5371 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5372 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5373 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5374 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5375 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5377 hw->phy.ops.release(hw);