1 /*******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 /* 82562G 10/100 Network Connection
35 * 82562G-2 10/100 Network Connection
36 * 82562GT 10/100 Network Connection
37 * 82562GT-2 10/100 Network Connection
38 * 82562V 10/100 Network Connection
39 * 82562V-2 10/100 Network Connection
40 * 82566DC-2 Gigabit Network Connection
41 * 82566DC Gigabit Network Connection
42 * 82566DM-2 Gigabit Network Connection
43 * 82566DM Gigabit Network Connection
44 * 82566MC Gigabit Network Connection
45 * 82566MM Gigabit Network Connection
46 * 82567LM Gigabit Network Connection
47 * 82567LF Gigabit Network Connection
48 * 82567V Gigabit Network Connection
49 * 82567LM-2 Gigabit Network Connection
50 * 82567LF-2 Gigabit Network Connection
51 * 82567V-2 Gigabit Network Connection
52 * 82567LF-3 Gigabit Network Connection
53 * 82567LM-3 Gigabit Network Connection
54 * 82567LM-4 Gigabit Network Connection
55 * 82577LM Gigabit Network Connection
56 * 82577LC Gigabit Network Connection
57 * 82578DM Gigabit Network Connection
58 * 82578DC Gigabit Network Connection
59 * 82579LM Gigabit Network Connection
60 * 82579V Gigabit Network Connection
63 #include "e1000_api.h"
65 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
66 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
67 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
68 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
69 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
70 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
71 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
72 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
73 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
74 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
75 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
76 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
77 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
78 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
79 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
81 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
83 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
84 u16 words, u16 *data);
85 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
86 u16 words, u16 *data);
87 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
88 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
89 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
91 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
92 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
93 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
94 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
95 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
96 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
97 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
98 u16 *speed, u16 *duplex);
99 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
100 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
101 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
102 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
103 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
104 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
105 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw);
106 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw);
107 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
108 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
109 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
110 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
111 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
112 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
113 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
114 u32 offset, u8 *data);
115 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
117 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
118 u32 offset, u16 *data);
119 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
120 u32 offset, u8 byte);
121 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
122 u32 offset, u8 data);
123 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
125 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
126 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
127 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
128 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
129 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
130 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
131 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
132 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
134 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
135 /* Offset 04h HSFSTS */
136 union ich8_hws_flash_status {
138 u16 flcdone:1; /* bit 0 Flash Cycle Done */
139 u16 flcerr:1; /* bit 1 Flash Cycle Error */
140 u16 dael:1; /* bit 2 Direct Access error Log */
141 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
142 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
143 u16 reserved1:2; /* bit 13:6 Reserved */
144 u16 reserved2:6; /* bit 13:6 Reserved */
145 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
146 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
151 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
152 /* Offset 06h FLCTL */
153 union ich8_hws_flash_ctrl {
154 struct ich8_hsflctl {
155 u16 flcgo:1; /* 0 Flash Cycle Go */
156 u16 flcycle:2; /* 2:1 Flash Cycle */
157 u16 reserved:5; /* 7:3 Reserved */
158 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
159 u16 flockdn:6; /* 15:10 Reserved */
164 /* ICH Flash Region Access Permissions */
165 union ich8_hws_flash_regacc {
167 u32 grra:8; /* 0:7 GbE region Read Access */
168 u32 grwa:8; /* 8:15 GbE region Write Access */
169 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
170 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
177 * @hw: pointer to the HW structure
179 * Test access to the PHY registers by reading the PHY ID registers. If
180 * the PHY ID is already known (e.g. resume path) compare it with known ID,
181 * otherwise assume the read PHY ID is correct if it is valid.
183 * Assumes the sw/fw/hw semaphore is already acquired.
185 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
192 for (retry_count = 0; retry_count < 2; retry_count++) {
193 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
194 if (ret_val || (phy_reg == 0xFFFF))
196 phy_id = (u32)(phy_reg << 16);
198 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
199 if (ret_val || (phy_reg == 0xFFFF)) {
203 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
208 if (hw->phy.id == phy_id)
212 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
217 * In case the PHY needs to be in mdio slow mode,
218 * set slow mode and try to get the PHY id again.
220 hw->phy.ops.release(hw);
221 ret_val = e1000_set_mdio_slow_mode_hv(hw);
223 ret_val = e1000_get_phy_id(hw);
224 hw->phy.ops.acquire(hw);
230 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
231 * @hw: pointer to the HW structure
233 * Workarounds/flow necessary for PHY initialization during driver load
236 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
238 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
241 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
243 ret_val = hw->phy.ops.acquire(hw);
245 DEBUGOUT("Failed to initialize PHY flow\n");
250 * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
251 * inaccessible and resetting the PHY is not blocked, toggle the
252 * LANPHYPC Value bit to force the interconnect to PCIe mode.
254 switch (hw->mac.type) {
257 * Gate automatic PHY configuration by hardware on
260 if ((hw->mac.type == e1000_pch2lan) &&
261 !(fwsm & E1000_ICH_FWSM_FW_VALID))
262 e1000_gate_hw_phy_config_ich8lan(hw, true);
264 if (e1000_phy_is_accessible_pchlan(hw)) {
270 if ((hw->mac.type == e1000_pchlan) &&
271 (fwsm & E1000_ICH_FWSM_FW_VALID))
274 if (hw->phy.ops.check_reset_block(hw)) {
275 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
279 DEBUGOUT("Toggling LANPHYPC\n");
281 /* Set Phy Config Counter to 50msec */
282 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
283 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
284 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
285 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
287 /* Toggle LANPHYPC Value bit */
288 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
289 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
290 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
291 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
292 E1000_WRITE_FLUSH(hw);
294 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
295 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
296 E1000_WRITE_FLUSH(hw);
303 hw->phy.ops.release(hw);
306 * Reset the PHY before any access to it. Doing so, ensures
307 * that the PHY is in a known good state before we read/write
308 * PHY registers. The generic reset is sufficient here,
309 * because we haven't determined the PHY type yet.
311 ret_val = e1000_phy_hw_reset_generic(hw);
313 /* Ungate automatic PHY configuration on non-managed 82579 */
314 if ((hw->mac.type == e1000_pch2lan) &&
315 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
317 e1000_gate_hw_phy_config_ich8lan(hw, false);
324 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
325 * @hw: pointer to the HW structure
327 * Initialize family-specific PHY parameters and function pointers.
329 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
331 struct e1000_phy_info *phy = &hw->phy;
332 s32 ret_val = E1000_SUCCESS;
334 DEBUGFUNC("e1000_init_phy_params_pchlan");
337 phy->reset_delay_us = 100;
339 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
340 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
341 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
342 phy->ops.set_page = e1000_set_page_igp;
343 phy->ops.read_reg = e1000_read_phy_reg_hv;
344 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
345 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
346 phy->ops.release = e1000_release_swflag_ich8lan;
347 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
348 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
349 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
350 phy->ops.write_reg = e1000_write_phy_reg_hv;
351 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
352 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
353 phy->ops.power_up = e1000_power_up_phy_copper;
354 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
355 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
357 phy->id = e1000_phy_unknown;
359 ret_val = e1000_init_phy_workarounds_pchlan(hw);
363 if (phy->id == e1000_phy_unknown)
364 switch (hw->mac.type) {
366 ret_val = e1000_get_phy_id(hw);
369 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
374 * In case the PHY needs to be in mdio slow mode,
375 * set slow mode and try to get the PHY id again.
377 ret_val = e1000_set_mdio_slow_mode_hv(hw);
380 ret_val = e1000_get_phy_id(hw);
385 phy->type = e1000_get_phy_type_from_id(phy->id);
388 case e1000_phy_82577:
389 case e1000_phy_82579:
391 phy->ops.check_polarity = e1000_check_polarity_82577;
392 phy->ops.force_speed_duplex =
393 e1000_phy_force_speed_duplex_82577;
394 phy->ops.get_cable_length = e1000_get_cable_length_82577;
395 phy->ops.get_info = e1000_get_phy_info_82577;
396 phy->ops.commit = e1000_phy_sw_reset_generic;
398 case e1000_phy_82578:
399 phy->ops.check_polarity = e1000_check_polarity_m88;
400 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
401 phy->ops.get_cable_length = e1000_get_cable_length_m88;
402 phy->ops.get_info = e1000_get_phy_info_m88;
405 ret_val = -E1000_ERR_PHY;
413 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
414 * @hw: pointer to the HW structure
416 * Initialize family-specific PHY parameters and function pointers.
418 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
420 struct e1000_phy_info *phy = &hw->phy;
424 DEBUGFUNC("e1000_init_phy_params_ich8lan");
427 phy->reset_delay_us = 100;
429 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
430 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
431 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
432 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
433 phy->ops.read_reg = e1000_read_phy_reg_igp;
434 phy->ops.release = e1000_release_swflag_ich8lan;
435 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
436 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
437 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
438 phy->ops.write_reg = e1000_write_phy_reg_igp;
439 phy->ops.power_up = e1000_power_up_phy_copper;
440 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
442 /* We may need to do this twice - once for IGP and if that fails,
443 * we'll set BM func pointers and try again
445 ret_val = e1000_determine_phy_address(hw);
447 phy->ops.write_reg = e1000_write_phy_reg_bm;
448 phy->ops.read_reg = e1000_read_phy_reg_bm;
449 ret_val = e1000_determine_phy_address(hw);
451 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
457 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
460 ret_val = e1000_get_phy_id(hw);
467 case IGP03E1000_E_PHY_ID:
468 phy->type = e1000_phy_igp_3;
469 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
470 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
471 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
472 phy->ops.get_info = e1000_get_phy_info_igp;
473 phy->ops.check_polarity = e1000_check_polarity_igp;
474 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
477 case IFE_PLUS_E_PHY_ID:
479 phy->type = e1000_phy_ife;
480 phy->autoneg_mask = E1000_ALL_NOT_GIG;
481 phy->ops.get_info = e1000_get_phy_info_ife;
482 phy->ops.check_polarity = e1000_check_polarity_ife;
483 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
485 case BME1000_E_PHY_ID:
486 phy->type = e1000_phy_bm;
487 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
488 phy->ops.read_reg = e1000_read_phy_reg_bm;
489 phy->ops.write_reg = e1000_write_phy_reg_bm;
490 phy->ops.commit = e1000_phy_sw_reset_generic;
491 phy->ops.get_info = e1000_get_phy_info_m88;
492 phy->ops.check_polarity = e1000_check_polarity_m88;
493 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
496 return -E1000_ERR_PHY;
500 return E1000_SUCCESS;
504 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
505 * @hw: pointer to the HW structure
507 * Initialize family-specific NVM parameters and function
510 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
512 struct e1000_nvm_info *nvm = &hw->nvm;
513 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
514 u32 gfpreg, sector_base_addr, sector_end_addr;
517 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
519 /* Can't read flash registers if the register set isn't mapped. */
520 if (!hw->flash_address) {
521 DEBUGOUT("ERROR: Flash registers not mapped\n");
522 return -E1000_ERR_CONFIG;
525 nvm->type = e1000_nvm_flash_sw;
527 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
529 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
530 * Add 1 to sector_end_addr since this sector is included in
533 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
534 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
536 /* flash_base_addr is byte-aligned */
537 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
539 /* find total size of the NVM, then cut in half since the total
540 * size represents two separate NVM banks.
542 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
543 << FLASH_SECTOR_ADDR_SHIFT;
544 nvm->flash_bank_size /= 2;
545 /* Adjust to word count */
546 nvm->flash_bank_size /= sizeof(u16);
548 nvm->word_size = E1000_SHADOW_RAM_WORDS;
550 /* Clear shadow ram */
551 for (i = 0; i < nvm->word_size; i++) {
552 dev_spec->shadow_ram[i].modified = false;
553 dev_spec->shadow_ram[i].value = 0xFFFF;
556 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
557 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
559 /* Function Pointers */
560 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
561 nvm->ops.release = e1000_release_nvm_ich8lan;
562 nvm->ops.read = e1000_read_nvm_ich8lan;
563 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
564 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
565 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
566 nvm->ops.write = e1000_write_nvm_ich8lan;
568 return E1000_SUCCESS;
572 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
573 * @hw: pointer to the HW structure
575 * Initialize family-specific MAC parameters and function
578 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
580 struct e1000_mac_info *mac = &hw->mac;
582 DEBUGFUNC("e1000_init_mac_params_ich8lan");
584 /* Set media type function pointer */
585 hw->phy.media_type = e1000_media_type_copper;
587 /* Set mta register count */
588 mac->mta_reg_count = 32;
589 /* Set rar entry count */
590 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
591 if (mac->type == e1000_ich8lan)
592 mac->rar_entry_count--;
593 /* Set if part includes ASF firmware */
594 mac->asf_firmware_present = true;
596 mac->has_fwsm = true;
597 /* ARC subsystem not supported */
598 mac->arc_subsystem_valid = false;
599 /* Adaptive IFS supported */
600 mac->adaptive_ifs = true;
602 /* Function pointers */
604 /* bus type/speed/width */
605 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
607 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
609 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
610 /* hw initialization */
611 mac->ops.init_hw = e1000_init_hw_ich8lan;
613 mac->ops.setup_link = e1000_setup_link_ich8lan;
614 /* physical interface setup */
615 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
617 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
619 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
620 /* multicast address update */
621 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
622 /* clear hardware counters */
623 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
625 /* LED and other operations */
630 /* check management mode */
631 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
633 mac->ops.id_led_init = e1000_id_led_init_generic;
635 mac->ops.blink_led = e1000_blink_led_generic;
637 mac->ops.setup_led = e1000_setup_led_generic;
639 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
640 /* turn on/off LED */
641 mac->ops.led_on = e1000_led_on_ich8lan;
642 mac->ops.led_off = e1000_led_off_ich8lan;
645 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
646 mac->ops.rar_set = e1000_rar_set_pch2lan;
649 /* check management mode */
650 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
652 mac->ops.id_led_init = e1000_id_led_init_pchlan;
654 mac->ops.setup_led = e1000_setup_led_pchlan;
656 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
657 /* turn on/off LED */
658 mac->ops.led_on = e1000_led_on_pchlan;
659 mac->ops.led_off = e1000_led_off_pchlan;
665 /* Enable PCS Lock-loss workaround for ICH8 */
666 if (mac->type == e1000_ich8lan)
667 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
669 /* Gate automatic PHY configuration by hardware on managed 82579 */
670 if ((mac->type == e1000_pch2lan) &&
671 (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
672 e1000_gate_hw_phy_config_ich8lan(hw, true);
674 return E1000_SUCCESS;
678 * __e1000_access_emi_reg_locked - Read/write EMI register
679 * @hw: pointer to the HW structure
680 * @addr: EMI address to program
681 * @data: pointer to value to read/write from/to the EMI address
682 * @read: boolean flag to indicate read or write
684 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
686 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
687 u16 *data, bool read)
689 s32 ret_val = E1000_SUCCESS;
691 DEBUGFUNC("__e1000_access_emi_reg_locked");
693 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
698 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
701 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
708 * e1000_read_emi_reg_locked - Read Extended Management Interface register
709 * @hw: pointer to the HW structure
710 * @addr: EMI address to program
711 * @data: value to be read from the EMI address
713 * Assumes the SW/FW/HW Semaphore is already acquired.
715 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
717 DEBUGFUNC("e1000_read_emi_reg_locked");
719 return __e1000_access_emi_reg_locked(hw, addr, data, true);
723 * e1000_write_emi_reg_locked - Write Extended Management Interface register
724 * @hw: pointer to the HW structure
725 * @addr: EMI address to program
726 * @data: value to be written to the EMI address
728 * Assumes the SW/FW/HW Semaphore is already acquired.
730 STATIC s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
732 DEBUGFUNC("e1000_read_emi_reg_locked");
734 return __e1000_access_emi_reg_locked(hw, addr, &data, false);
738 * e1000_set_eee_pchlan - Enable/disable EEE support
739 * @hw: pointer to the HW structure
741 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
742 * the link and the EEE capabilities of the link partner. The LPI Control
743 * register bits will remain set only if/when link is up.
745 STATIC s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
747 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
751 DEBUGFUNC("e1000_set_eee_pchlan");
753 if ((hw->phy.type != e1000_phy_82579) &&
754 (hw->phy.type != e1000_phy_i217))
755 return E1000_SUCCESS;
757 ret_val = hw->phy.ops.acquire(hw);
761 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
765 /* Clear bits that enable EEE in various speeds */
766 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
768 /* Enable EEE if not disabled by user */
769 if (!dev_spec->eee_disable) {
770 u16 lpa, pcs_status, data;
772 /* Save off link partner's EEE ability */
773 switch (hw->phy.type) {
774 case e1000_phy_82579:
775 lpa = I82579_EEE_LP_ABILITY;
776 pcs_status = I82579_EEE_PCS_STATUS;
779 lpa = I217_EEE_LP_ABILITY;
780 pcs_status = I217_EEE_PCS_STATUS;
783 ret_val = -E1000_ERR_PHY;
786 ret_val = e1000_read_emi_reg_locked(hw, lpa,
787 &dev_spec->eee_lp_ability);
792 * Enable EEE only for speeds in which the link partner is
795 if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
796 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
798 if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
799 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
800 if (data & NWAY_LPAR_100TX_FD_CAPS)
801 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
804 * EEE is not supported in 100Half, so ignore
805 * partner's EEE in 100 ability if full-duplex
808 dev_spec->eee_lp_ability &=
809 ~I82579_EEE_100_SUPPORTED;
812 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
813 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
818 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
820 hw->phy.ops.release(hw);
826 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
827 * @hw: pointer to the HW structure
829 * Checks to see of the link status of the hardware has changed. If a
830 * change in link status has been detected, then we read the PHY registers
831 * to get the current speed/duplex if link exists.
833 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
835 struct e1000_mac_info *mac = &hw->mac;
840 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
842 /* We only want to go out to the PHY registers to see if Auto-Neg
843 * has completed and/or if our link status has changed. The
844 * get_link_status flag is set upon receiving a Link Status
845 * Change or Rx Sequence Error interrupt.
847 if (!mac->get_link_status)
848 return E1000_SUCCESS;
851 * First we want to see if the MII Status Register reports
852 * link. If so, then we want to get the current speed/duplex
855 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
859 if (hw->mac.type == e1000_pchlan) {
860 ret_val = e1000_k1_gig_workaround_hv(hw, link);
865 /* Clear link partner's EEE ability */
866 hw->dev_spec.ich8lan.eee_lp_ability = 0;
869 return E1000_SUCCESS; /* No link detected */
871 mac->get_link_status = false;
873 switch (hw->mac.type) {
875 ret_val = e1000_k1_workaround_lv(hw);
880 if (hw->phy.type == e1000_phy_82578) {
881 ret_val = e1000_link_stall_workaround_hv(hw);
886 /* Workaround for PCHx parts in half-duplex:
887 * Set the number of preambles removed from the packet
888 * when it is passed from the PHY to the MAC to prevent
889 * the MAC from misinterpreting the packet type.
891 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
892 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
894 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
896 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
898 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
904 /* Check if there was DownShift, must be checked
905 * immediately after link-up
907 e1000_check_downshift_generic(hw);
909 /* Enable/Disable EEE after link up */
910 ret_val = e1000_set_eee_pchlan(hw);
914 /* If we are forcing speed/duplex, then we simply return since
915 * we have already determined whether we have link or not.
918 return -E1000_ERR_CONFIG;
920 /* Auto-Neg is enabled. Auto Speed Detection takes care
921 * of MAC speed/duplex configuration. So we only need to
922 * configure Collision Distance in the MAC.
924 mac->ops.config_collision_dist(hw);
926 /* Configure Flow Control now that Auto-Neg has completed.
927 * First, we need to restore the desired flow control
928 * settings because we may have had to re-autoneg with a
929 * different link partner.
931 ret_val = e1000_config_fc_after_link_up_generic(hw);
933 DEBUGOUT("Error configuring flow control\n");
939 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
940 * @hw: pointer to the HW structure
942 * Initialize family-specific function pointers for PHY, MAC, and NVM.
944 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
946 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
948 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
949 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
950 switch (hw->mac.type) {
954 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
958 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
966 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
967 * @hw: pointer to the HW structure
969 * Acquires the mutex for performing NVM operations.
971 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
973 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
975 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
977 return E1000_SUCCESS;
981 * e1000_release_nvm_ich8lan - Release NVM mutex
982 * @hw: pointer to the HW structure
984 * Releases the mutex used while performing NVM operations.
986 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
988 DEBUGFUNC("e1000_release_nvm_ich8lan");
990 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
996 * e1000_acquire_swflag_ich8lan - Acquire software control flag
997 * @hw: pointer to the HW structure
999 * Acquires the software control flag for performing PHY and select
1002 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1004 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1005 s32 ret_val = E1000_SUCCESS;
1007 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1009 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1012 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1013 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1021 DEBUGOUT("SW has already locked the resource.\n");
1022 ret_val = -E1000_ERR_CONFIG;
1026 timeout = SW_FLAG_TIMEOUT;
1028 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1029 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1032 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1033 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1041 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1042 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1043 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1044 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1045 ret_val = -E1000_ERR_CONFIG;
1051 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1057 * e1000_release_swflag_ich8lan - Release software control flag
1058 * @hw: pointer to the HW structure
1060 * Releases the software control flag for performing PHY and select
1063 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1067 DEBUGFUNC("e1000_release_swflag_ich8lan");
1069 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1071 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1072 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1073 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1075 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1078 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1084 * e1000_check_mng_mode_ich8lan - Checks management mode
1085 * @hw: pointer to the HW structure
1087 * This checks if the adapter has any manageability enabled.
1088 * This is a function pointer entry point only called by read/write
1089 * routines for the PHY and NVM parts.
1091 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1095 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1097 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1099 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1100 ((fwsm & E1000_FWSM_MODE_MASK) ==
1101 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1105 * e1000_check_mng_mode_pchlan - Checks management mode
1106 * @hw: pointer to the HW structure
1108 * This checks if the adapter has iAMT enabled.
1109 * This is a function pointer entry point only called by read/write
1110 * routines for the PHY and NVM parts.
1112 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1116 DEBUGFUNC("e1000_check_mng_mode_pchlan");
1118 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1120 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1121 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1125 * e1000_rar_set_pch2lan - Set receive address register
1126 * @hw: pointer to the HW structure
1127 * @addr: pointer to the receive address
1128 * @index: receive address array register
1130 * Sets the receive address array register at index to the address passed
1131 * in by addr. For 82579, RAR[0] is the base address register that is to
1132 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1133 * Use SHRA[0-3] in place of those reserved for ME.
1135 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1137 u32 rar_low, rar_high;
1139 DEBUGFUNC("e1000_rar_set_pch2lan");
1141 /* HW expects these in little endian so we reverse the byte order
1142 * from network order (big endian) to little endian
1144 rar_low = ((u32) addr[0] |
1145 ((u32) addr[1] << 8) |
1146 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1148 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1150 /* If MAC address zero, no need to set the AV bit */
1151 if (rar_low || rar_high)
1152 rar_high |= E1000_RAH_AV;
1155 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1156 E1000_WRITE_FLUSH(hw);
1157 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1158 E1000_WRITE_FLUSH(hw);
1162 if (index < hw->mac.rar_entry_count) {
1165 ret_val = e1000_acquire_swflag_ich8lan(hw);
1169 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1170 E1000_WRITE_FLUSH(hw);
1171 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1172 E1000_WRITE_FLUSH(hw);
1174 e1000_release_swflag_ich8lan(hw);
1176 /* verify the register updates */
1177 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1178 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1181 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1182 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1186 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1190 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1191 * @hw: pointer to the HW structure
1193 * Checks if firmware is blocking the reset of the PHY.
1194 * This is a function pointer entry point only called by
1197 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1201 DEBUGFUNC("e1000_check_reset_block_ich8lan");
1203 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1205 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1206 : E1000_BLK_PHY_RESET;
1210 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1211 * @hw: pointer to the HW structure
1213 * Assumes semaphore already acquired.
1216 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1219 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1220 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1221 E1000_STRAP_SMT_FREQ_SHIFT;
1222 s32 ret_val = E1000_SUCCESS;
1224 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1226 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1230 phy_data &= ~HV_SMB_ADDR_MASK;
1231 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1232 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1234 if (hw->phy.type == e1000_phy_i217) {
1235 /* Restore SMBus frequency */
1237 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1238 phy_data |= (freq & (1 << 0)) <<
1239 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1240 phy_data |= (freq & (1 << 1)) <<
1241 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1243 DEBUGOUT("Unsupported SMB frequency in PHY\n");
1247 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1251 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1252 * @hw: pointer to the HW structure
1254 * SW should configure the LCD from the NVM extended configuration region
1255 * as a workaround for certain parts.
1257 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1259 struct e1000_phy_info *phy = &hw->phy;
1260 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1261 s32 ret_val = E1000_SUCCESS;
1262 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1264 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1266 /* Initialize the PHY from the NVM on ICH platforms. This
1267 * is needed due to an issue where the NVM configuration is
1268 * not properly autoloaded after power transitions.
1269 * Therefore, after each PHY reset, we will load the
1270 * configuration data out of the NVM manually.
1272 switch (hw->mac.type) {
1274 if (phy->type != e1000_phy_igp_3)
1277 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1278 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1279 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1285 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1291 ret_val = hw->phy.ops.acquire(hw);
1295 data = E1000_READ_REG(hw, E1000_FEXTNVM);
1296 if (!(data & sw_cfg_mask))
1299 /* Make sure HW does not configure LCD from PHY
1300 * extended configuration before SW configuration
1302 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1303 if ((hw->mac.type < e1000_pch2lan) &&
1304 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1307 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1308 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1309 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1313 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1314 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1316 if (((hw->mac.type == e1000_pchlan) &&
1317 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1318 (hw->mac.type > e1000_pchlan)) {
1319 /* HW configures the SMBus address and LEDs when the
1320 * OEM and LCD Write Enable bits are set in the NVM.
1321 * When both NVM bits are cleared, SW will configure
1324 ret_val = e1000_write_smbus_addr(hw);
1328 data = E1000_READ_REG(hw, E1000_LEDCTL);
1329 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1335 /* Configure LCD from extended configuration region. */
1337 /* cnf_base_addr is in DWORD */
1338 word_addr = (u16)(cnf_base_addr << 1);
1340 for (i = 0; i < cnf_size; i++) {
1341 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1346 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1351 /* Save off the PHY page for future writes. */
1352 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1353 phy_page = reg_data;
1357 reg_addr &= PHY_REG_MASK;
1358 reg_addr |= phy_page;
1360 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1367 hw->phy.ops.release(hw);
1372 * e1000_k1_gig_workaround_hv - K1 Si workaround
1373 * @hw: pointer to the HW structure
1374 * @link: link up bool flag
1376 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1377 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1378 * If link is down, the function will restore the default K1 setting located
1381 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1383 s32 ret_val = E1000_SUCCESS;
1385 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1387 DEBUGFUNC("e1000_k1_gig_workaround_hv");
1389 if (hw->mac.type != e1000_pchlan)
1390 return E1000_SUCCESS;
1392 /* Wrap the whole flow with the sw flag */
1393 ret_val = hw->phy.ops.acquire(hw);
1397 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1399 if (hw->phy.type == e1000_phy_82578) {
1400 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1405 status_reg &= BM_CS_STATUS_LINK_UP |
1406 BM_CS_STATUS_RESOLVED |
1407 BM_CS_STATUS_SPEED_MASK;
1409 if (status_reg == (BM_CS_STATUS_LINK_UP |
1410 BM_CS_STATUS_RESOLVED |
1411 BM_CS_STATUS_SPEED_1000))
1415 if (hw->phy.type == e1000_phy_82577) {
1416 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1421 status_reg &= HV_M_STATUS_LINK_UP |
1422 HV_M_STATUS_AUTONEG_COMPLETE |
1423 HV_M_STATUS_SPEED_MASK;
1425 if (status_reg == (HV_M_STATUS_LINK_UP |
1426 HV_M_STATUS_AUTONEG_COMPLETE |
1427 HV_M_STATUS_SPEED_1000))
1431 /* Link stall fix for link up */
1432 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1438 /* Link stall fix for link down */
1439 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1445 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1448 hw->phy.ops.release(hw);
1454 * e1000_configure_k1_ich8lan - Configure K1 power state
1455 * @hw: pointer to the HW structure
1456 * @enable: K1 state to configure
1458 * Configure the K1 power state based on the provided parameter.
1459 * Assumes semaphore already acquired.
1461 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1463 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1465 s32 ret_val = E1000_SUCCESS;
1471 DEBUGFUNC("e1000_configure_k1_ich8lan");
1473 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1479 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1481 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1483 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1489 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1490 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1492 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1493 reg |= E1000_CTRL_FRCSPD;
1494 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1496 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1497 E1000_WRITE_FLUSH(hw);
1499 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1500 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1501 E1000_WRITE_FLUSH(hw);
1504 return E1000_SUCCESS;
1508 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1509 * @hw: pointer to the HW structure
1510 * @d0_state: boolean if entering d0 or d3 device state
1512 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1513 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1514 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1516 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1522 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1524 if (hw->mac.type < e1000_pchlan)
1527 ret_val = hw->phy.ops.acquire(hw);
1531 if (hw->mac.type == e1000_pchlan) {
1532 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1533 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1537 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1538 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1541 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1543 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1547 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1550 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1551 oem_reg |= HV_OEM_BITS_GBE_DIS;
1553 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1554 oem_reg |= HV_OEM_BITS_LPLU;
1556 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1557 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1558 oem_reg |= HV_OEM_BITS_GBE_DIS;
1560 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1561 E1000_PHY_CTRL_NOND0A_LPLU))
1562 oem_reg |= HV_OEM_BITS_LPLU;
1565 /* Set Restart auto-neg to activate the bits */
1566 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1567 !hw->phy.ops.check_reset_block(hw))
1568 oem_reg |= HV_OEM_BITS_RESTART_AN;
1570 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1573 hw->phy.ops.release(hw);
1580 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1581 * @hw: pointer to the HW structure
1583 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1588 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1590 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1594 data |= HV_KMRN_MDIO_SLOW;
1596 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1602 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1603 * done after every PHY reset.
1605 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1607 s32 ret_val = E1000_SUCCESS;
1610 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1612 if (hw->mac.type != e1000_pchlan)
1613 return E1000_SUCCESS;
1615 /* Set MDIO slow mode before any other MDIO access */
1616 if (hw->phy.type == e1000_phy_82577) {
1617 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1622 if (((hw->phy.type == e1000_phy_82577) &&
1623 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1624 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1625 /* Disable generation of early preamble */
1626 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1630 /* Preamble tuning for SSC */
1631 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
1637 if (hw->phy.type == e1000_phy_82578) {
1638 /* Return registers to default by doing a soft reset then
1639 * writing 0x3140 to the control register.
1641 if (hw->phy.revision < 2) {
1642 e1000_phy_sw_reset_generic(hw);
1643 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1649 ret_val = hw->phy.ops.acquire(hw);
1654 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1655 hw->phy.ops.release(hw);
1659 /* Configure the K1 Si workaround during phy reset assuming there is
1660 * link so that it disables K1 if link is in 1Gbps.
1662 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1666 /* Workaround for link disconnects on a busy hub in half duplex */
1667 ret_val = hw->phy.ops.acquire(hw);
1670 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1673 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
1678 /* set MSE higher to enable link to stay up when noise is high */
1679 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
1681 hw->phy.ops.release(hw);
1687 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1688 * @hw: pointer to the HW structure
1690 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1696 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1698 ret_val = hw->phy.ops.acquire(hw);
1701 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1705 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1706 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1707 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1708 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1709 (u16)(mac_reg & 0xFFFF));
1710 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1711 (u16)((mac_reg >> 16) & 0xFFFF));
1713 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1714 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1715 (u16)(mac_reg & 0xFFFF));
1716 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1717 (u16)((mac_reg & E1000_RAH_AV)
1721 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1724 hw->phy.ops.release(hw);
1727 static u32 e1000_calc_rx_da_crc(u8 mac[])
1729 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1730 u32 i, j, mask, crc;
1732 DEBUGFUNC("e1000_calc_rx_da_crc");
1735 for (i = 0; i < 6; i++) {
1737 for (j = 8; j > 0; j--) {
1738 mask = (crc & 1) * (-1);
1739 crc = (crc >> 1) ^ (poly & mask);
1746 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1748 * @hw: pointer to the HW structure
1749 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1751 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1753 s32 ret_val = E1000_SUCCESS;
1758 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1760 if (hw->mac.type != e1000_pch2lan)
1761 return E1000_SUCCESS;
1763 /* disable Rx path while enabling/disabling workaround */
1764 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1765 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
1766 phy_reg | (1 << 14));
1771 /* Write Rx addresses (rar_entry_count for RAL/H, and
1772 * SHRAL/H) and initial CRC values to the MAC
1774 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1775 u8 mac_addr[ETH_ADDR_LEN] = {0};
1776 u32 addr_high, addr_low;
1778 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1779 if (!(addr_high & E1000_RAH_AV))
1781 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1782 mac_addr[0] = (addr_low & 0xFF);
1783 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1784 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1785 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1786 mac_addr[4] = (addr_high & 0xFF);
1787 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1789 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1790 e1000_calc_rx_da_crc(mac_addr));
1793 /* Write Rx addresses to the PHY */
1794 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1796 /* Enable jumbo frame workaround in the MAC */
1797 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1798 mac_reg &= ~(1 << 14);
1799 mac_reg |= (7 << 15);
1800 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1802 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1803 mac_reg |= E1000_RCTL_SECRC;
1804 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1806 ret_val = e1000_read_kmrn_reg_generic(hw,
1807 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1811 ret_val = e1000_write_kmrn_reg_generic(hw,
1812 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1816 ret_val = e1000_read_kmrn_reg_generic(hw,
1817 E1000_KMRNCTRLSTA_HD_CTRL,
1821 data &= ~(0xF << 8);
1823 ret_val = e1000_write_kmrn_reg_generic(hw,
1824 E1000_KMRNCTRLSTA_HD_CTRL,
1829 /* Enable jumbo frame workaround in the PHY */
1830 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1831 data &= ~(0x7F << 5);
1832 data |= (0x37 << 5);
1833 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1836 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1838 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1841 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1842 data &= ~(0x3FF << 2);
1843 data |= (0x1A << 2);
1844 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1847 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
1850 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1851 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
1856 /* Write MAC register values back to h/w defaults */
1857 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1858 mac_reg &= ~(0xF << 14);
1859 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1861 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1862 mac_reg &= ~E1000_RCTL_SECRC;
1863 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1865 ret_val = e1000_read_kmrn_reg_generic(hw,
1866 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1870 ret_val = e1000_write_kmrn_reg_generic(hw,
1871 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1875 ret_val = e1000_read_kmrn_reg_generic(hw,
1876 E1000_KMRNCTRLSTA_HD_CTRL,
1880 data &= ~(0xF << 8);
1882 ret_val = e1000_write_kmrn_reg_generic(hw,
1883 E1000_KMRNCTRLSTA_HD_CTRL,
1888 /* Write PHY register values back to h/w defaults */
1889 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1890 data &= ~(0x7F << 5);
1891 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1894 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1896 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1899 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1900 data &= ~(0x3FF << 2);
1902 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1905 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1908 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1909 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
1915 /* re-enable Rx path after enabling/disabling workaround */
1916 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
1921 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1922 * done after every PHY reset.
1924 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1926 s32 ret_val = E1000_SUCCESS;
1928 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1930 if (hw->mac.type != e1000_pch2lan)
1931 return E1000_SUCCESS;
1933 /* Set MDIO slow mode before any other MDIO access */
1934 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1938 ret_val = hw->phy.ops.acquire(hw);
1941 /* set MSE higher to enable link to stay up when noise is high */
1942 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
1945 /* drop link after 5 times MSE threshold was reached */
1946 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
1948 hw->phy.ops.release(hw);
1954 * e1000_k1_gig_workaround_lv - K1 Si workaround
1955 * @hw: pointer to the HW structure
1957 * Workaround to set the K1 beacon duration for 82579 parts
1959 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1961 s32 ret_val = E1000_SUCCESS;
1966 DEBUGFUNC("e1000_k1_workaround_lv");
1968 if (hw->mac.type != e1000_pch2lan)
1969 return E1000_SUCCESS;
1971 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1972 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1976 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1977 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1978 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1979 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1981 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
1985 if (status_reg & HV_M_STATUS_SPEED_1000) {
1988 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1989 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1990 /* LV 1G Packet drop issue wa */
1991 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
1995 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
1996 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2001 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2002 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2004 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2005 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2012 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2013 * @hw: pointer to the HW structure
2014 * @gate: boolean set to true to gate, false to ungate
2016 * Gate/ungate the automatic PHY configuration via hardware; perform
2017 * the configuration via software instead.
2019 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2023 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2025 if (hw->mac.type != e1000_pch2lan)
2028 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2031 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2033 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2035 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2039 * e1000_lan_init_done_ich8lan - Check for PHY config completion
2040 * @hw: pointer to the HW structure
2042 * Check the appropriate indication the MAC has finished configuring the
2043 * PHY after a software reset.
2045 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2047 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2049 DEBUGFUNC("e1000_lan_init_done_ich8lan");
2051 /* Wait for basic configuration completes before proceeding */
2053 data = E1000_READ_REG(hw, E1000_STATUS);
2054 data &= E1000_STATUS_LAN_INIT_DONE;
2056 } while ((!data) && --loop);
2058 /* If basic configuration is incomplete before the above loop
2059 * count reaches 0, loading the configuration from NVM will
2060 * leave the PHY in a bad state possibly resulting in no link.
2063 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2065 /* Clear the Init Done bit for the next init event */
2066 data = E1000_READ_REG(hw, E1000_STATUS);
2067 data &= ~E1000_STATUS_LAN_INIT_DONE;
2068 E1000_WRITE_REG(hw, E1000_STATUS, data);
2072 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2073 * @hw: pointer to the HW structure
2075 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2077 s32 ret_val = E1000_SUCCESS;
2080 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2082 if (hw->phy.ops.check_reset_block(hw))
2083 return E1000_SUCCESS;
2085 /* Allow time for h/w to get to quiescent state after reset */
2088 /* Perform any necessary post-reset workarounds */
2089 switch (hw->mac.type) {
2091 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2096 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2104 /* Clear the host wakeup bit after lcd reset */
2105 if (hw->mac.type >= e1000_pchlan) {
2106 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
2107 reg &= ~BM_WUC_HOST_WU_BIT;
2108 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2111 /* Configure the LCD with the extended configuration region in NVM */
2112 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2116 /* Configure the LCD with the OEM bits in NVM */
2117 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2119 if (hw->mac.type == e1000_pch2lan) {
2120 /* Ungate automatic PHY configuration on non-managed 82579 */
2121 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2122 E1000_ICH_FWSM_FW_VALID)) {
2124 e1000_gate_hw_phy_config_ich8lan(hw, false);
2127 /* Set EEE LPI Update Timer to 200usec */
2128 ret_val = hw->phy.ops.acquire(hw);
2131 ret_val = e1000_write_emi_reg_locked(hw,
2132 I82579_LPI_UPDATE_TIMER,
2134 hw->phy.ops.release(hw);
2141 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2142 * @hw: pointer to the HW structure
2145 * This is a function pointer entry point called by drivers
2146 * or other shared routines.
2148 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2150 s32 ret_val = E1000_SUCCESS;
2152 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2154 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2155 if ((hw->mac.type == e1000_pch2lan) &&
2156 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2157 e1000_gate_hw_phy_config_ich8lan(hw, true);
2159 ret_val = e1000_phy_hw_reset_generic(hw);
2163 return e1000_post_phy_reset_ich8lan(hw);
2167 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2168 * @hw: pointer to the HW structure
2169 * @active: true to enable LPLU, false to disable
2171 * Sets the LPLU state according to the active flag. For PCH, if OEM write
2172 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2173 * the phy speed. This function will manually set the LPLU bit and restart
2174 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
2175 * since it configures the same bit.
2177 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2179 s32 ret_val = E1000_SUCCESS;
2182 DEBUGFUNC("e1000_set_lplu_state_pchlan");
2184 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2189 oem_reg |= HV_OEM_BITS_LPLU;
2191 oem_reg &= ~HV_OEM_BITS_LPLU;
2193 if (!hw->phy.ops.check_reset_block(hw))
2194 oem_reg |= HV_OEM_BITS_RESTART_AN;
2196 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2200 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2201 * @hw: pointer to the HW structure
2202 * @active: true to enable LPLU, false to disable
2204 * Sets the LPLU D0 state according to the active flag. When
2205 * activating LPLU this function also disables smart speed
2206 * and vice versa. LPLU will not be activated unless the
2207 * device autonegotiation advertisement meets standards of
2208 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2209 * This is a function pointer entry point only called by
2210 * PHY setup routines.
2212 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2214 struct e1000_phy_info *phy = &hw->phy;
2216 s32 ret_val = E1000_SUCCESS;
2219 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2221 if (phy->type == e1000_phy_ife)
2222 return E1000_SUCCESS;
2224 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2227 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2228 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2230 if (phy->type != e1000_phy_igp_3)
2231 return E1000_SUCCESS;
2233 /* Call gig speed drop workaround on LPLU before accessing
2236 if (hw->mac.type == e1000_ich8lan)
2237 e1000_gig_downshift_workaround_ich8lan(hw);
2239 /* When LPLU is enabled, we should disable SmartSpeed */
2240 ret_val = phy->ops.read_reg(hw,
2241 IGP01E1000_PHY_PORT_CONFIG,
2243 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2244 ret_val = phy->ops.write_reg(hw,
2245 IGP01E1000_PHY_PORT_CONFIG,
2250 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2251 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2253 if (phy->type != e1000_phy_igp_3)
2254 return E1000_SUCCESS;
2256 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
2257 * during Dx states where the power conservation is most
2258 * important. During driver activity we should enable
2259 * SmartSpeed, so performance is maintained.
2261 if (phy->smart_speed == e1000_smart_speed_on) {
2262 ret_val = phy->ops.read_reg(hw,
2263 IGP01E1000_PHY_PORT_CONFIG,
2268 data |= IGP01E1000_PSCFR_SMART_SPEED;
2269 ret_val = phy->ops.write_reg(hw,
2270 IGP01E1000_PHY_PORT_CONFIG,
2274 } else if (phy->smart_speed == e1000_smart_speed_off) {
2275 ret_val = phy->ops.read_reg(hw,
2276 IGP01E1000_PHY_PORT_CONFIG,
2281 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2282 ret_val = phy->ops.write_reg(hw,
2283 IGP01E1000_PHY_PORT_CONFIG,
2290 return E1000_SUCCESS;
2294 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2295 * @hw: pointer to the HW structure
2296 * @active: true to enable LPLU, false to disable
2298 * Sets the LPLU D3 state according to the active flag. When
2299 * activating LPLU this function also disables smart speed
2300 * and vice versa. LPLU will not be activated unless the
2301 * device autonegotiation advertisement meets standards of
2302 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2303 * This is a function pointer entry point only called by
2304 * PHY setup routines.
2306 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2308 struct e1000_phy_info *phy = &hw->phy;
2310 s32 ret_val = E1000_SUCCESS;
2313 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2315 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2318 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2319 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2321 if (phy->type != e1000_phy_igp_3)
2322 return E1000_SUCCESS;
2324 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
2325 * during Dx states where the power conservation is most
2326 * important. During driver activity we should enable
2327 * SmartSpeed, so performance is maintained.
2329 if (phy->smart_speed == e1000_smart_speed_on) {
2330 ret_val = phy->ops.read_reg(hw,
2331 IGP01E1000_PHY_PORT_CONFIG,
2336 data |= IGP01E1000_PSCFR_SMART_SPEED;
2337 ret_val = phy->ops.write_reg(hw,
2338 IGP01E1000_PHY_PORT_CONFIG,
2342 } else if (phy->smart_speed == e1000_smart_speed_off) {
2343 ret_val = phy->ops.read_reg(hw,
2344 IGP01E1000_PHY_PORT_CONFIG,
2349 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2350 ret_val = phy->ops.write_reg(hw,
2351 IGP01E1000_PHY_PORT_CONFIG,
2356 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2357 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2358 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2359 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2360 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2362 if (phy->type != e1000_phy_igp_3)
2363 return E1000_SUCCESS;
2365 /* Call gig speed drop workaround on LPLU before accessing
2368 if (hw->mac.type == e1000_ich8lan)
2369 e1000_gig_downshift_workaround_ich8lan(hw);
2371 /* When LPLU is enabled, we should disable SmartSpeed */
2372 ret_val = phy->ops.read_reg(hw,
2373 IGP01E1000_PHY_PORT_CONFIG,
2378 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2379 ret_val = phy->ops.write_reg(hw,
2380 IGP01E1000_PHY_PORT_CONFIG,
2388 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2389 * @hw: pointer to the HW structure
2390 * @bank: pointer to the variable that returns the active bank
2392 * Reads signature byte from the NVM using the flash access registers.
2393 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2395 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2398 struct e1000_nvm_info *nvm = &hw->nvm;
2399 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2400 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2404 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2406 switch (hw->mac.type) {
2409 eecd = E1000_READ_REG(hw, E1000_EECD);
2410 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2411 E1000_EECD_SEC1VAL_VALID_MASK) {
2412 if (eecd & E1000_EECD_SEC1VAL)
2417 return E1000_SUCCESS;
2419 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2422 /* set bank to 0 in case flash read fails */
2426 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2430 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2431 E1000_ICH_NVM_SIG_VALUE) {
2433 return E1000_SUCCESS;
2437 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2442 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2443 E1000_ICH_NVM_SIG_VALUE) {
2445 return E1000_SUCCESS;
2448 DEBUGOUT("ERROR: No valid NVM bank present\n");
2449 return -E1000_ERR_NVM;
2454 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2455 * @hw: pointer to the HW structure
2456 * @offset: The offset (in bytes) of the word(s) to read.
2457 * @words: Size of data to read in words
2458 * @data: Pointer to the word(s) to read at offset.
2460 * Reads a word(s) from the NVM using the flash access registers.
2462 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2465 struct e1000_nvm_info *nvm = &hw->nvm;
2466 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2468 s32 ret_val = E1000_SUCCESS;
2472 DEBUGFUNC("e1000_read_nvm_ich8lan");
2474 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2476 DEBUGOUT("nvm parameter(s) out of bounds\n");
2477 ret_val = -E1000_ERR_NVM;
2481 nvm->ops.acquire(hw);
2483 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2484 if (ret_val != E1000_SUCCESS) {
2485 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2489 act_offset = (bank) ? nvm->flash_bank_size : 0;
2490 act_offset += offset;
2492 ret_val = E1000_SUCCESS;
2493 for (i = 0; i < words; i++) {
2494 if (dev_spec->shadow_ram[offset+i].modified) {
2495 data[i] = dev_spec->shadow_ram[offset+i].value;
2497 ret_val = e1000_read_flash_word_ich8lan(hw,
2506 nvm->ops.release(hw);
2510 DEBUGOUT1("NVM read error: %d\n", ret_val);
2516 * e1000_flash_cycle_init_ich8lan - Initialize flash
2517 * @hw: pointer to the HW structure
2519 * This function does initial flash setup so that a new read/write/erase cycle
2522 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2524 union ich8_hws_flash_status hsfsts;
2525 s32 ret_val = -E1000_ERR_NVM;
2527 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2529 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2531 /* Check if the flash descriptor is valid */
2532 if (!hsfsts.hsf_status.fldesvalid) {
2533 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
2534 return -E1000_ERR_NVM;
2537 /* Clear FCERR and DAEL in hw status by writing 1 */
2538 hsfsts.hsf_status.flcerr = 1;
2539 hsfsts.hsf_status.dael = 1;
2541 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2543 /* Either we should have a hardware SPI cycle in progress
2544 * bit to check against, in order to start a new cycle or
2545 * FDONE bit should be changed in the hardware so that it
2546 * is 1 after hardware reset, which can then be used as an
2547 * indication whether a cycle is in progress or has been
2551 if (!hsfsts.hsf_status.flcinprog) {
2552 /* There is no cycle running at present,
2553 * so we can start a cycle.
2554 * Begin by setting Flash Cycle Done.
2556 hsfsts.hsf_status.flcdone = 1;
2557 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2558 ret_val = E1000_SUCCESS;
2562 /* Otherwise poll for sometime so the current
2563 * cycle has a chance to end before giving up.
2565 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2566 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2568 if (!hsfsts.hsf_status.flcinprog) {
2569 ret_val = E1000_SUCCESS;
2574 if (ret_val == E1000_SUCCESS) {
2575 /* Successful in waiting for previous cycle to timeout,
2576 * now set the Flash Cycle Done.
2578 hsfsts.hsf_status.flcdone = 1;
2579 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2582 DEBUGOUT("Flash controller busy, cannot get access\n");
2590 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2591 * @hw: pointer to the HW structure
2592 * @timeout: maximum time to wait for completion
2594 * This function starts a flash cycle and waits for its completion.
2596 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2598 union ich8_hws_flash_ctrl hsflctl;
2599 union ich8_hws_flash_status hsfsts;
2602 DEBUGFUNC("e1000_flash_cycle_ich8lan");
2604 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2605 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2606 hsflctl.hsf_ctrl.flcgo = 1;
2607 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2609 /* wait till FDONE bit is set to 1 */
2611 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2612 if (hsfsts.hsf_status.flcdone)
2615 } while (i++ < timeout);
2617 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
2618 return E1000_SUCCESS;
2620 return -E1000_ERR_NVM;
2624 * e1000_read_flash_word_ich8lan - Read word from flash
2625 * @hw: pointer to the HW structure
2626 * @offset: offset to data location
2627 * @data: pointer to the location for storing the data
2629 * Reads the flash word at offset into data. Offset is converted
2630 * to bytes before read.
2632 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2635 DEBUGFUNC("e1000_read_flash_word_ich8lan");
2638 return -E1000_ERR_NVM;
2640 /* Must convert offset into bytes. */
2643 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2647 * e1000_read_flash_byte_ich8lan - Read byte from flash
2648 * @hw: pointer to the HW structure
2649 * @offset: The offset of the byte to read.
2650 * @data: Pointer to a byte to store the value read.
2652 * Reads a single byte from the NVM using the flash access registers.
2654 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2660 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2666 return E1000_SUCCESS;
2670 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2671 * @hw: pointer to the HW structure
2672 * @offset: The offset (in bytes) of the byte or word to read.
2673 * @size: Size of data to read, 1=byte 2=word
2674 * @data: Pointer to the word to store the value read.
2676 * Reads a byte or word from the NVM using the flash access registers.
2678 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2681 union ich8_hws_flash_status hsfsts;
2682 union ich8_hws_flash_ctrl hsflctl;
2683 u32 flash_linear_addr;
2685 s32 ret_val = -E1000_ERR_NVM;
2688 DEBUGFUNC("e1000_read_flash_data_ich8lan");
2690 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2691 return -E1000_ERR_NVM;
2693 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2694 hw->nvm.flash_base_addr;
2699 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2700 if (ret_val != E1000_SUCCESS)
2703 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2704 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2705 hsflctl.hsf_ctrl.fldbcount = size - 1;
2706 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2707 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2709 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2711 ret_val = e1000_flash_cycle_ich8lan(hw,
2712 ICH_FLASH_READ_COMMAND_TIMEOUT);
2714 /* Check if FCERR is set to 1, if set to 1, clear it
2715 * and try the whole sequence a few more times, else
2716 * read in (shift in) the Flash Data0, the order is
2717 * least significant byte first msb to lsb
2719 if (ret_val == E1000_SUCCESS) {
2720 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2722 *data = (u8)(flash_data & 0x000000FF);
2724 *data = (u16)(flash_data & 0x0000FFFF);
2727 /* If we've gotten here, then things are probably
2728 * completely hosed, but if the error condition is
2729 * detected, it won't hurt to give it another try...
2730 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2732 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2734 if (hsfsts.hsf_status.flcerr) {
2735 /* Repeat for some time before giving up. */
2737 } else if (!hsfsts.hsf_status.flcdone) {
2738 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
2742 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2748 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2749 * @hw: pointer to the HW structure
2750 * @offset: The offset (in bytes) of the word(s) to write.
2751 * @words: Size of data to write in words
2752 * @data: Pointer to the word(s) to write at offset.
2754 * Writes a byte or word to the NVM using the flash access registers.
2756 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2759 struct e1000_nvm_info *nvm = &hw->nvm;
2760 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2763 DEBUGFUNC("e1000_write_nvm_ich8lan");
2765 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2767 DEBUGOUT("nvm parameter(s) out of bounds\n");
2768 return -E1000_ERR_NVM;
2771 nvm->ops.acquire(hw);
2773 for (i = 0; i < words; i++) {
2774 dev_spec->shadow_ram[offset+i].modified = true;
2775 dev_spec->shadow_ram[offset+i].value = data[i];
2778 nvm->ops.release(hw);
2780 return E1000_SUCCESS;
2784 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2785 * @hw: pointer to the HW structure
2787 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2788 * which writes the checksum to the shadow ram. The changes in the shadow
2789 * ram are then committed to the EEPROM by processing each bank at a time
2790 * checking for the modified bit and writing only the pending changes.
2791 * After a successful commit, the shadow ram is cleared and is ready for
2794 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2796 struct e1000_nvm_info *nvm = &hw->nvm;
2797 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2798 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2802 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2804 ret_val = e1000_update_nvm_checksum_generic(hw);
2808 if (nvm->type != e1000_nvm_flash_sw)
2811 nvm->ops.acquire(hw);
2813 /* We're writing to the opposite bank so if we're on bank 1,
2814 * write to bank 0 etc. We also need to erase the segment that
2815 * is going to be written
2817 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2818 if (ret_val != E1000_SUCCESS) {
2819 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2824 new_bank_offset = nvm->flash_bank_size;
2825 old_bank_offset = 0;
2826 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2830 old_bank_offset = nvm->flash_bank_size;
2831 new_bank_offset = 0;
2832 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2837 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2838 /* Determine whether to write the value stored
2839 * in the other NVM bank or a modified value stored
2842 if (dev_spec->shadow_ram[i].modified) {
2843 data = dev_spec->shadow_ram[i].value;
2845 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2852 /* If the word is 0x13, then make sure the signature bits
2853 * (15:14) are 11b until the commit has completed.
2854 * This will allow us to write 10b which indicates the
2855 * signature is valid. We want to do this after the write
2856 * has completed so that we don't mark the segment valid
2857 * while the write is still in progress
2859 if (i == E1000_ICH_NVM_SIG_WORD)
2860 data |= E1000_ICH_NVM_SIG_MASK;
2862 /* Convert offset to bytes. */
2863 act_offset = (i + new_bank_offset) << 1;
2866 /* Write the bytes to the new bank. */
2867 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2874 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2881 /* Don't bother writing the segment valid bits if sector
2882 * programming failed.
2885 DEBUGOUT("Flash commit failed.\n");
2889 /* Finally validate the new segment by setting bit 15:14
2890 * to 10b in word 0x13 , this can be done without an
2891 * erase as well since these bits are 11 to start with
2892 * and we need to change bit 14 to 0b
2894 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2895 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2900 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2906 /* And invalidate the previously valid segment by setting
2907 * its signature word (0x13) high_byte to 0b. This can be
2908 * done without an erase because flash erase sets all bits
2909 * to 1's. We can write 1's to 0's without an erase
2911 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2912 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2916 /* Great! Everything worked, we can now clear the cached entries. */
2917 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2918 dev_spec->shadow_ram[i].modified = false;
2919 dev_spec->shadow_ram[i].value = 0xFFFF;
2923 nvm->ops.release(hw);
2925 /* Reload the EEPROM, or else modifications will not appear
2926 * until after the next adapter reset.
2929 nvm->ops.reload(hw);
2935 DEBUGOUT1("NVM update error: %d\n", ret_val);
2941 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2942 * @hw: pointer to the HW structure
2944 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2945 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2946 * calculated, in which case we need to calculate the checksum and set bit 6.
2948 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2953 u16 valid_csum_mask;
2955 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2957 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
2958 * the checksum needs to be fixed. This bit is an indication that
2959 * the NVM was prepared by OEM software and did not calculate
2960 * the checksum...a likely scenario.
2962 switch (hw->mac.type) {
2964 word = NVM_FUTURE_INIT_WORD1;
2965 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
2969 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
2973 if (!(data & valid_csum_mask)) {
2974 data |= valid_csum_mask;
2975 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
2978 ret_val = hw->nvm.ops.update(hw);
2983 return e1000_validate_nvm_checksum_generic(hw);
2987 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2988 * @hw: pointer to the HW structure
2989 * @offset: The offset (in bytes) of the byte/word to read.
2990 * @size: Size of data to read, 1=byte 2=word
2991 * @data: The byte(s) to write to the NVM.
2993 * Writes one/two bytes to the NVM using the flash access registers.
2995 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2998 union ich8_hws_flash_status hsfsts;
2999 union ich8_hws_flash_ctrl hsflctl;
3000 u32 flash_linear_addr;
3005 DEBUGFUNC("e1000_write_ich8_data");
3007 if (size < 1 || size > 2 || data > size * 0xff ||
3008 offset > ICH_FLASH_LINEAR_ADDR_MASK)
3009 return -E1000_ERR_NVM;
3011 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3012 hw->nvm.flash_base_addr;
3017 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3018 if (ret_val != E1000_SUCCESS)
3021 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3022 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3023 hsflctl.hsf_ctrl.fldbcount = size - 1;
3024 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3025 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3027 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3030 flash_data = (u32)data & 0x00FF;
3032 flash_data = (u32)data;
3034 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3036 /* check if FCERR is set to 1 , if set to 1, clear it
3037 * and try the whole sequence a few more times else done
3039 ret_val = e1000_flash_cycle_ich8lan(hw,
3040 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3041 if (ret_val == E1000_SUCCESS)
3044 /* If we're here, then things are most likely
3045 * completely hosed, but if the error condition
3046 * is detected, it won't hurt to give it another
3047 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3049 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3050 if (hsfsts.hsf_status.flcerr)
3051 /* Repeat for some time before giving up. */
3053 if (!hsfsts.hsf_status.flcdone) {
3054 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3057 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3063 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3064 * @hw: pointer to the HW structure
3065 * @offset: The index of the byte to read.
3066 * @data: The byte to write to the NVM.
3068 * Writes a single byte to the NVM using the flash access registers.
3070 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3073 u16 word = (u16)data;
3075 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3077 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3081 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3082 * @hw: pointer to the HW structure
3083 * @offset: The offset of the byte to write.
3084 * @byte: The byte to write to the NVM.
3086 * Writes a single byte to the NVM using the flash access registers.
3087 * Goes through a retry algorithm before giving up.
3089 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3090 u32 offset, u8 byte)
3093 u16 program_retries;
3095 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3097 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3101 for (program_retries = 0; program_retries < 100; program_retries++) {
3102 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3104 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3105 if (ret_val == E1000_SUCCESS)
3108 if (program_retries == 100)
3109 return -E1000_ERR_NVM;
3111 return E1000_SUCCESS;
3115 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3116 * @hw: pointer to the HW structure
3117 * @bank: 0 for first bank, 1 for second bank, etc.
3119 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3120 * bank N is 4096 * N + flash_reg_addr.
3122 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3124 struct e1000_nvm_info *nvm = &hw->nvm;
3125 union ich8_hws_flash_status hsfsts;
3126 union ich8_hws_flash_ctrl hsflctl;
3127 u32 flash_linear_addr;
3128 /* bank size is in 16bit words - adjust to bytes */
3129 u32 flash_bank_size = nvm->flash_bank_size * 2;
3132 s32 j, iteration, sector_size;
3134 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3136 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3138 /* Determine HW Sector size: Read BERASE bits of hw flash status
3140 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3141 * consecutive sectors. The start index for the nth Hw sector
3142 * can be calculated as = bank * 4096 + n * 256
3143 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3144 * The start index for the nth Hw sector can be calculated
3146 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3147 * (ich9 only, otherwise error condition)
3148 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3150 switch (hsfsts.hsf_status.berasesz) {
3152 /* Hw sector size 256 */
3153 sector_size = ICH_FLASH_SEG_SIZE_256;
3154 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3157 sector_size = ICH_FLASH_SEG_SIZE_4K;
3161 sector_size = ICH_FLASH_SEG_SIZE_8K;
3165 sector_size = ICH_FLASH_SEG_SIZE_64K;
3169 return -E1000_ERR_NVM;
3172 /* Start with the base address, then add the sector offset. */
3173 flash_linear_addr = hw->nvm.flash_base_addr;
3174 flash_linear_addr += (bank) ? flash_bank_size : 0;
3176 for (j = 0; j < iteration ; j++) {
3179 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3183 /* Write a value 11 (block Erase) in Flash
3184 * Cycle field in hw flash control
3186 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3188 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3189 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3192 /* Write the last 24 bits of an index within the
3193 * block into Flash Linear address field in Flash
3196 flash_linear_addr += (j * sector_size);
3197 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3200 ret_val = e1000_flash_cycle_ich8lan(hw,
3201 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3202 if (ret_val == E1000_SUCCESS)
3205 /* Check if FCERR is set to 1. If 1,
3206 * clear it and try the whole sequence
3207 * a few more times else Done
3209 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3211 if (hsfsts.hsf_status.flcerr)
3212 /* repeat for some time before giving up */
3214 else if (!hsfsts.hsf_status.flcdone)
3216 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3219 return E1000_SUCCESS;
3223 * e1000_valid_led_default_ich8lan - Set the default LED settings
3224 * @hw: pointer to the HW structure
3225 * @data: Pointer to the LED settings
3227 * Reads the LED default settings from the NVM to data. If the NVM LED
3228 * settings is all 0's or F's, set the LED default to a valid LED default
3231 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3235 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3237 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3239 DEBUGOUT("NVM Read Error\n");
3243 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3244 *data = ID_LED_DEFAULT_ICH8LAN;
3246 return E1000_SUCCESS;
3250 * e1000_id_led_init_pchlan - store LED configurations
3251 * @hw: pointer to the HW structure
3253 * PCH does not control LEDs via the LEDCTL register, rather it uses
3254 * the PHY LED configuration register.
3256 * PCH also does not have an "always on" or "always off" mode which
3257 * complicates the ID feature. Instead of using the "on" mode to indicate
3258 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3259 * use "link_up" mode. The LEDs will still ID on request if there is no
3260 * link based on logic in e1000_led_[on|off]_pchlan().
3262 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3264 struct e1000_mac_info *mac = &hw->mac;
3266 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3267 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3268 u16 data, i, temp, shift;
3270 DEBUGFUNC("e1000_id_led_init_pchlan");
3272 /* Get default ID LED modes */
3273 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3277 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3278 mac->ledctl_mode1 = mac->ledctl_default;
3279 mac->ledctl_mode2 = mac->ledctl_default;
3281 for (i = 0; i < 4; i++) {
3282 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3285 case ID_LED_ON1_DEF2:
3286 case ID_LED_ON1_ON2:
3287 case ID_LED_ON1_OFF2:
3288 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3289 mac->ledctl_mode1 |= (ledctl_on << shift);
3291 case ID_LED_OFF1_DEF2:
3292 case ID_LED_OFF1_ON2:
3293 case ID_LED_OFF1_OFF2:
3294 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3295 mac->ledctl_mode1 |= (ledctl_off << shift);
3302 case ID_LED_DEF1_ON2:
3303 case ID_LED_ON1_ON2:
3304 case ID_LED_OFF1_ON2:
3305 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3306 mac->ledctl_mode2 |= (ledctl_on << shift);
3308 case ID_LED_DEF1_OFF2:
3309 case ID_LED_ON1_OFF2:
3310 case ID_LED_OFF1_OFF2:
3311 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3312 mac->ledctl_mode2 |= (ledctl_off << shift);
3320 return E1000_SUCCESS;
3324 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3325 * @hw: pointer to the HW structure
3327 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3328 * register, so the the bus width is hard coded.
3330 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3332 struct e1000_bus_info *bus = &hw->bus;
3335 DEBUGFUNC("e1000_get_bus_info_ich8lan");
3337 ret_val = e1000_get_bus_info_pcie_generic(hw);
3339 /* ICH devices are "PCI Express"-ish. They have
3340 * a configuration space, but do not contain
3341 * PCI Express Capability registers, so bus width
3342 * must be hardcoded.
3344 if (bus->width == e1000_bus_width_unknown)
3345 bus->width = e1000_bus_width_pcie_x1;
3351 * e1000_reset_hw_ich8lan - Reset the hardware
3352 * @hw: pointer to the HW structure
3354 * Does a full reset of the hardware which includes a reset of the PHY and
3357 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3359 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3364 DEBUGFUNC("e1000_reset_hw_ich8lan");
3366 /* Prevent the PCI-E bus from sticking if there is no TLP connection
3367 * on the last TLP read/write transaction when MAC is reset.
3369 ret_val = e1000_disable_pcie_master_generic(hw);
3371 DEBUGOUT("PCI-E Master disable polling has failed.\n");
3373 DEBUGOUT("Masking off all interrupts\n");
3374 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3376 /* Disable the Transmit and Receive units. Then delay to allow
3377 * any pending transactions to complete before we hit the MAC
3378 * with the global reset.
3380 E1000_WRITE_REG(hw, E1000_RCTL, 0);
3381 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3382 E1000_WRITE_FLUSH(hw);
3386 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3387 if (hw->mac.type == e1000_ich8lan) {
3388 /* Set Tx and Rx buffer allocation to 8k apiece. */
3389 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3390 /* Set Packet Buffer Size to 16k. */
3391 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3394 if (hw->mac.type == e1000_pchlan) {
3395 /* Save the NVM K1 bit setting*/
3396 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3400 if (kum_cfg & E1000_NVM_K1_ENABLE)
3401 dev_spec->nvm_k1_enabled = true;
3403 dev_spec->nvm_k1_enabled = false;
3406 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3408 if (!hw->phy.ops.check_reset_block(hw)) {
3409 /* Full-chip reset requires MAC and PHY reset at the same
3410 * time to make sure the interface between MAC and the
3411 * external PHY is reset.
3413 ctrl |= E1000_CTRL_PHY_RST;
3415 /* Gate automatic PHY configuration by hardware on
3418 if ((hw->mac.type == e1000_pch2lan) &&
3419 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3420 e1000_gate_hw_phy_config_ich8lan(hw, true);
3422 ret_val = e1000_acquire_swflag_ich8lan(hw);
3423 DEBUGOUT("Issuing a global reset to ich8lan\n");
3424 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3425 /* cannot issue a flush here because it hangs the hardware */
3428 /* Set Phy Config Counter to 50msec */
3429 if (hw->mac.type == e1000_pch2lan) {
3430 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
3431 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3432 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3433 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
3437 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
3439 if (ctrl & E1000_CTRL_PHY_RST) {
3440 ret_val = hw->phy.ops.get_cfg_done(hw);
3444 ret_val = e1000_post_phy_reset_ich8lan(hw);
3449 /* For PCH, this write will make sure that any noise
3450 * will be detected as a CRC error and be dropped rather than show up
3451 * as a bad packet to the DMA engine.
3453 if (hw->mac.type == e1000_pchlan)
3454 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3456 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3457 E1000_READ_REG(hw, E1000_ICR);
3459 reg = E1000_READ_REG(hw, E1000_KABGTXD);
3460 reg |= E1000_KABGTXD_BGSQLBIAS;
3461 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
3463 return E1000_SUCCESS;
3467 * e1000_init_hw_ich8lan - Initialize the hardware
3468 * @hw: pointer to the HW structure
3470 * Prepares the hardware for transmit and receive by doing the following:
3471 * - initialize hardware bits
3472 * - initialize LED identification
3473 * - setup receive address registers
3474 * - setup flow control
3475 * - setup transmit descriptors
3476 * - clear statistics
3478 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3480 struct e1000_mac_info *mac = &hw->mac;
3481 u32 ctrl_ext, txdctl, snoop;
3485 DEBUGFUNC("e1000_init_hw_ich8lan");
3487 e1000_initialize_hw_bits_ich8lan(hw);
3489 /* Initialize identification LED */
3490 ret_val = mac->ops.id_led_init(hw);
3492 DEBUGOUT("Error initializing identification LED\n");
3493 /* This is not fatal and we should not stop init due to this */
3495 /* Setup the receive address. */
3496 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3498 /* Zero out the Multicast HASH table */
3499 DEBUGOUT("Zeroing the MTA\n");
3500 for (i = 0; i < mac->mta_reg_count; i++)
3501 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3503 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
3504 * the ME. Disable wakeup by clearing the host wakeup bit.
3505 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3507 if (hw->phy.type == e1000_phy_82578) {
3508 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3509 i &= ~BM_WUC_HOST_WU_BIT;
3510 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3511 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3516 /* Setup link and flow control */
3517 ret_val = mac->ops.setup_link(hw);
3519 /* Set the transmit descriptor write-back policy for both queues */
3520 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3521 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3522 E1000_TXDCTL_FULL_TX_DESC_WB;
3523 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3524 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3525 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3526 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3527 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3528 E1000_TXDCTL_FULL_TX_DESC_WB;
3529 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3530 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3531 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3533 /* ICH8 has opposite polarity of no_snoop bits.
3534 * By default, we should use snoop behavior.
3536 if (mac->type == e1000_ich8lan)
3537 snoop = PCIE_ICH8_SNOOP_ALL;
3539 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3540 e1000_set_pcie_no_snoop_generic(hw, snoop);
3542 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3543 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3544 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3546 /* Clear all of the statistics registers (clear on read). It is
3547 * important that we do this after we have tried to establish link
3548 * because the symbol error count will increment wildly if there
3551 e1000_clear_hw_cntrs_ich8lan(hw);
3557 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3558 * @hw: pointer to the HW structure
3560 * Sets/Clears required hardware bits necessary for correctly setting up the
3561 * hardware for transmit and receive.
3563 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3567 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3569 /* Extended Device Control */
3570 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3572 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3573 if (hw->mac.type >= e1000_pchlan)
3574 reg |= E1000_CTRL_EXT_PHYPDEN;
3575 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3577 /* Transmit Descriptor Control 0 */
3578 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3580 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3582 /* Transmit Descriptor Control 1 */
3583 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3585 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3587 /* Transmit Arbitration Control 0 */
3588 reg = E1000_READ_REG(hw, E1000_TARC(0));
3589 if (hw->mac.type == e1000_ich8lan)
3590 reg |= (1 << 28) | (1 << 29);
3591 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3592 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3594 /* Transmit Arbitration Control 1 */
3595 reg = E1000_READ_REG(hw, E1000_TARC(1));
3596 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3600 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3601 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3604 if (hw->mac.type == e1000_ich8lan) {
3605 reg = E1000_READ_REG(hw, E1000_STATUS);
3607 E1000_WRITE_REG(hw, E1000_STATUS, reg);
3610 /* work-around descriptor data corruption issue during nfs v2 udp
3611 * traffic, just disable the nfs filtering capability
3613 reg = E1000_READ_REG(hw, E1000_RFCTL);
3614 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3616 /* Disable IPv6 extension header parsing because some malformed
3617 * IPv6 headers can hang the Rx.
3619 if (hw->mac.type == e1000_ich8lan)
3620 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3621 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3627 * e1000_setup_link_ich8lan - Setup flow control and link settings
3628 * @hw: pointer to the HW structure
3630 * Determines which flow control settings to use, then configures flow
3631 * control. Calls the appropriate media-specific link configuration
3632 * function. Assuming the adapter has a valid link partner, a valid link
3633 * should be established. Assumes the hardware has previously been reset
3634 * and the transmitter and receiver are not enabled.
3636 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3640 DEBUGFUNC("e1000_setup_link_ich8lan");
3642 if (hw->phy.ops.check_reset_block(hw))
3643 return E1000_SUCCESS;
3645 /* ICH parts do not have a word in the NVM to determine
3646 * the default flow control setting, so we explicitly
3649 if (hw->fc.requested_mode == e1000_fc_default)
3650 hw->fc.requested_mode = e1000_fc_full;
3652 /* Save off the requested flow control mode for use later. Depending
3653 * on the link partner's capabilities, we may or may not use this mode.
3655 hw->fc.current_mode = hw->fc.requested_mode;
3657 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3658 hw->fc.current_mode);
3660 /* Continue to configure the copper link. */
3661 ret_val = hw->mac.ops.setup_physical_interface(hw);
3665 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3666 if ((hw->phy.type == e1000_phy_82578) ||
3667 (hw->phy.type == e1000_phy_82579) ||
3668 (hw->phy.type == e1000_phy_i217) ||
3669 (hw->phy.type == e1000_phy_82577)) {
3670 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3672 ret_val = hw->phy.ops.write_reg(hw,
3673 PHY_REG(BM_PORT_CTRL_PAGE, 27),
3679 return e1000_set_fc_watermarks_generic(hw);
3683 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3684 * @hw: pointer to the HW structure
3686 * Configures the kumeran interface to the PHY to wait the appropriate time
3687 * when polling the PHY, then call the generic setup_copper_link to finish
3688 * configuring the copper link.
3690 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3696 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3698 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3699 ctrl |= E1000_CTRL_SLU;
3700 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3701 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3703 /* Set the mac to wait the maximum time between each iteration
3704 * and increase the max iterations when polling the phy;
3705 * this fixes erroneous timeouts at 10Mbps.
3707 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3711 ret_val = e1000_read_kmrn_reg_generic(hw,
3712 E1000_KMRNCTRLSTA_INBAND_PARAM,
3717 ret_val = e1000_write_kmrn_reg_generic(hw,
3718 E1000_KMRNCTRLSTA_INBAND_PARAM,
3723 switch (hw->phy.type) {
3724 case e1000_phy_igp_3:
3725 ret_val = e1000_copper_link_setup_igp(hw);
3730 case e1000_phy_82578:
3731 ret_val = e1000_copper_link_setup_m88(hw);
3735 case e1000_phy_82577:
3736 case e1000_phy_82579:
3737 case e1000_phy_i217:
3738 ret_val = e1000_copper_link_setup_82577(hw);
3743 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3748 reg_data &= ~IFE_PMC_AUTO_MDIX;
3750 switch (hw->phy.mdix) {
3752 reg_data &= ~IFE_PMC_FORCE_MDIX;
3755 reg_data |= IFE_PMC_FORCE_MDIX;
3759 reg_data |= IFE_PMC_AUTO_MDIX;
3762 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3771 return e1000_setup_copper_link_generic(hw);
3775 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3776 * @hw: pointer to the HW structure
3777 * @speed: pointer to store current link speed
3778 * @duplex: pointer to store the current link duplex
3780 * Calls the generic get_speed_and_duplex to retrieve the current link
3781 * information and then calls the Kumeran lock loss workaround for links at
3784 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3789 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3791 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3795 if ((hw->mac.type == e1000_ich8lan) &&
3796 (hw->phy.type == e1000_phy_igp_3) &&
3797 (*speed == SPEED_1000)) {
3798 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3805 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3806 * @hw: pointer to the HW structure
3808 * Work-around for 82566 Kumeran PCS lock loss:
3809 * On link status change (i.e. PCI reset, speed change) and link is up and
3811 * 0) if workaround is optionally disabled do nothing
3812 * 1) wait 1ms for Kumeran link to come up
3813 * 2) check Kumeran Diagnostic register PCS lock loss bit
3814 * 3) if not set the link is locked (all is good), otherwise...
3816 * 5) repeat up to 10 times
3817 * Note: this is only called for IGP3 copper when speed is 1gb.
3819 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3821 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3827 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3829 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3830 return E1000_SUCCESS;
3832 /* Make sure link is up before proceeding. If not just return.
3833 * Attempting this while link is negotiating fouled up link
3836 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3838 return E1000_SUCCESS;
3840 for (i = 0; i < 10; i++) {
3841 /* read once to clear */
3842 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3845 /* and again to get new status */
3846 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3850 /* check for PCS lock */
3851 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3852 return E1000_SUCCESS;
3854 /* Issue PHY reset */
3855 hw->phy.ops.reset(hw);
3858 /* Disable GigE link negotiation */
3859 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3860 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3861 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3862 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3864 /* Call gig speed drop workaround on Gig disable before accessing
3867 e1000_gig_downshift_workaround_ich8lan(hw);
3869 /* unable to acquire PCS lock */
3870 return -E1000_ERR_PHY;
3874 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3875 * @hw: pointer to the HW structure
3876 * @state: boolean value used to set the current Kumeran workaround state
3878 * If ICH8, set the current Kumeran workaround state (enabled - true
3879 * /disabled - false).
3881 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3884 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3886 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3888 if (hw->mac.type != e1000_ich8lan) {
3889 DEBUGOUT("Workaround applies to ICH8 only.\n");
3893 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3899 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3900 * @hw: pointer to the HW structure
3902 * Workaround for 82566 power-down on D3 entry:
3903 * 1) disable gigabit link
3904 * 2) write VR power-down enable
3906 * Continue if successful, else issue LCD reset and repeat
3908 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3914 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3916 if (hw->phy.type != e1000_phy_igp_3)
3919 /* Try the workaround twice (if needed) */
3922 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3923 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3924 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3925 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3927 /* Call gig speed drop workaround on Gig disable before
3928 * accessing any PHY registers
3930 if (hw->mac.type == e1000_ich8lan)
3931 e1000_gig_downshift_workaround_ich8lan(hw);
3933 /* Write VR power-down enable */
3934 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3935 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3936 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3937 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3939 /* Read it back and test */
3940 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3941 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3942 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3945 /* Issue PHY reset and repeat at most one more time */
3946 reg = E1000_READ_REG(hw, E1000_CTRL);
3947 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3953 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3954 * @hw: pointer to the HW structure
3956 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3957 * LPLU, Gig disable, MDIC PHY reset):
3958 * 1) Set Kumeran Near-end loopback
3959 * 2) Clear Kumeran Near-end loopback
3960 * Should only be called for ICH8[m] devices with any 1G Phy.
3962 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3967 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3969 if ((hw->mac.type != e1000_ich8lan) ||
3970 (hw->phy.type == e1000_phy_ife))
3973 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3977 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3978 ret_val = e1000_write_kmrn_reg_generic(hw,
3979 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3983 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3984 ret_val = e1000_write_kmrn_reg_generic(hw,
3985 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3990 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3991 * @hw: pointer to the HW structure
3993 * During S0 to Sx transition, it is possible the link remains at gig
3994 * instead of negotiating to a lower speed. Before going to Sx, set
3995 * 'Gig Disable' to force link speed negotiation to a lower speed based on
3996 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
3997 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3998 * needs to be written.
3999 * Parts that support (and are linked to a partner which support) EEE in
4000 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4001 * than 10Mbps w/o EEE.
4003 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4005 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4009 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4011 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4012 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4013 if (hw->phy.type == e1000_phy_i217) {
4016 ret_val = hw->phy.ops.acquire(hw);
4020 if (!dev_spec->eee_disable) {
4024 e1000_read_emi_reg_locked(hw,
4025 I217_EEE_ADVERTISEMENT,
4030 /* Disable LPLU if both link partners support 100BaseT
4031 * EEE and 100Full is advertised on both ends of the
4034 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4035 (dev_spec->eee_lp_ability &
4036 I82579_EEE_100_SUPPORTED) &&
4037 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4038 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4039 E1000_PHY_CTRL_NOND0A_LPLU);
4042 /* For i217 Intel Rapid Start Technology support,
4043 * when the system is going into Sx and no manageability engine
4044 * is present, the driver must configure proxy to reset only on
4045 * power good. LPI (Low Power Idle) state must also reset only
4046 * on power good, as well as the MTA (Multicast table array).
4047 * The SMBus release must also be disabled on LCD reset.
4049 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4050 E1000_ICH_FWSM_FW_VALID)) {
4052 /* Enable proxy to reset only on power good. */
4053 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4055 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4056 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4059 /* Set bit enable LPI (EEE) to reset only on
4062 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4063 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4064 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4066 /* Disable the SMB release on LCD reset. */
4067 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4068 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4069 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4072 /* Enable MTA to reset for Intel Rapid Start Technology
4075 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4076 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4077 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4080 hw->phy.ops.release(hw);
4083 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4085 if (hw->mac.type == e1000_ich8lan)
4086 e1000_gig_downshift_workaround_ich8lan(hw);
4088 if (hw->mac.type >= e1000_pchlan) {
4089 e1000_oem_bits_config_ich8lan(hw, false);
4091 /* Reset PHY to activate OEM bits on 82577/8 */
4092 if (hw->mac.type == e1000_pchlan)
4093 e1000_phy_hw_reset_generic(hw);
4095 ret_val = hw->phy.ops.acquire(hw);
4098 e1000_write_smbus_addr(hw);
4099 hw->phy.ops.release(hw);
4106 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4107 * @hw: pointer to the HW structure
4109 * During Sx to S0 transitions on non-managed devices or managed devices
4110 * on which PHY resets are not blocked, if the PHY registers cannot be
4111 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
4113 * On i217, setup Intel Rapid Start Technology.
4115 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4119 DEBUGFUNC("e1000_resume_workarounds_pchlan");
4121 if (hw->mac.type < e1000_pch2lan)
4124 ret_val = e1000_init_phy_workarounds_pchlan(hw);
4126 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4130 /* For i217 Intel Rapid Start Technology support when the system
4131 * is transitioning from Sx and no manageability engine is present
4132 * configure SMBus to restore on reset, disable proxy, and enable
4133 * the reset on MTA (Multicast table array).
4135 if (hw->phy.type == e1000_phy_i217) {
4138 ret_val = hw->phy.ops.acquire(hw);
4140 DEBUGOUT("Failed to setup iRST\n");
4144 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4145 E1000_ICH_FWSM_FW_VALID)) {
4146 /* Restore clear on SMB if no manageability engine
4149 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4153 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4154 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4157 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4159 /* Enable reset on MTA */
4160 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4164 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4165 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4168 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4169 hw->phy.ops.release(hw);
4174 * e1000_cleanup_led_ich8lan - Restore the default LED operation
4175 * @hw: pointer to the HW structure
4177 * Return the LED back to the default configuration.
4179 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4181 DEBUGFUNC("e1000_cleanup_led_ich8lan");
4183 if (hw->phy.type == e1000_phy_ife)
4184 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4187 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4188 return E1000_SUCCESS;
4192 * e1000_led_on_ich8lan - Turn LEDs on
4193 * @hw: pointer to the HW structure
4197 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4199 DEBUGFUNC("e1000_led_on_ich8lan");
4201 if (hw->phy.type == e1000_phy_ife)
4202 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4203 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4205 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
4206 return E1000_SUCCESS;
4210 * e1000_led_off_ich8lan - Turn LEDs off
4211 * @hw: pointer to the HW structure
4213 * Turn off the LEDs.
4215 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4217 DEBUGFUNC("e1000_led_off_ich8lan");
4219 if (hw->phy.type == e1000_phy_ife)
4220 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4221 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
4223 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
4224 return E1000_SUCCESS;
4228 * e1000_setup_led_pchlan - Configures SW controllable LED
4229 * @hw: pointer to the HW structure
4231 * This prepares the SW controllable LED for use.
4233 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4235 DEBUGFUNC("e1000_setup_led_pchlan");
4237 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4238 (u16)hw->mac.ledctl_mode1);
4242 * e1000_cleanup_led_pchlan - Restore the default LED operation
4243 * @hw: pointer to the HW structure
4245 * Return the LED back to the default configuration.
4247 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4249 DEBUGFUNC("e1000_cleanup_led_pchlan");
4251 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4252 (u16)hw->mac.ledctl_default);
4256 * e1000_led_on_pchlan - Turn LEDs on
4257 * @hw: pointer to the HW structure
4261 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4263 u16 data = (u16)hw->mac.ledctl_mode2;
4266 DEBUGFUNC("e1000_led_on_pchlan");
4268 /* If no link, then turn LED on by setting the invert bit
4269 * for each LED that's mode is "link_up" in ledctl_mode2.
4271 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4272 for (i = 0; i < 3; i++) {
4273 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4274 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4275 E1000_LEDCTL_MODE_LINK_UP)
4277 if (led & E1000_PHY_LED0_IVRT)
4278 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4280 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4284 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4288 * e1000_led_off_pchlan - Turn LEDs off
4289 * @hw: pointer to the HW structure
4291 * Turn off the LEDs.
4293 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4295 u16 data = (u16)hw->mac.ledctl_mode1;
4298 DEBUGFUNC("e1000_led_off_pchlan");
4300 /* If no link, then turn LED off by clearing the invert bit
4301 * for each LED that's mode is "link_up" in ledctl_mode1.
4303 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4304 for (i = 0; i < 3; i++) {
4305 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4306 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4307 E1000_LEDCTL_MODE_LINK_UP)
4309 if (led & E1000_PHY_LED0_IVRT)
4310 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4312 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4316 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4320 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4321 * @hw: pointer to the HW structure
4323 * Read appropriate register for the config done bit for completion status
4324 * and configure the PHY through s/w for EEPROM-less parts.
4326 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4327 * config done bit, so only an error is logged and continues. If we were
4328 * to return with error, EEPROM-less silicon would not be able to be reset
4331 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4333 s32 ret_val = E1000_SUCCESS;
4337 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4339 e1000_get_cfg_done_generic(hw);
4341 /* Wait for indication from h/w that it has completed basic config */
4342 if (hw->mac.type >= e1000_ich10lan) {
4343 e1000_lan_init_done_ich8lan(hw);
4345 ret_val = e1000_get_auto_rd_done_generic(hw);
4347 /* When auto config read does not complete, do not
4348 * return with an error. This can happen in situations
4349 * where there is no eeprom and prevents getting link.
4351 DEBUGOUT("Auto Read Done did not complete\n");
4352 ret_val = E1000_SUCCESS;
4356 /* Clear PHY Reset Asserted bit */
4357 status = E1000_READ_REG(hw, E1000_STATUS);
4358 if (status & E1000_STATUS_PHYRA)
4359 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4361 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4363 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4364 if (hw->mac.type <= e1000_ich9lan) {
4365 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
4366 (hw->phy.type == e1000_phy_igp_3)) {
4367 e1000_phy_init_script_igp3(hw);
4370 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4371 /* Maybe we should do a basic PHY config */
4372 DEBUGOUT("EEPROM not present\n");
4373 ret_val = -E1000_ERR_CONFIG;
4381 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4382 * @hw: pointer to the HW structure
4384 * In the case of a PHY power down to save power, or to turn off link during a
4385 * driver unload, or wake on lan is not enabled, remove the link.
4387 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4389 /* If the management interface is not enabled, then power down */
4390 if (!(hw->mac.ops.check_mng_mode(hw) ||
4391 hw->phy.ops.check_reset_block(hw)))
4392 e1000_power_down_phy_copper(hw);
4398 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4399 * @hw: pointer to the HW structure
4401 * Clears hardware counters specific to the silicon family and calls
4402 * clear_hw_cntrs_generic to clear all general purpose counters.
4404 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4409 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4411 e1000_clear_hw_cntrs_base_generic(hw);
4413 E1000_READ_REG(hw, E1000_ALGNERRC);
4414 E1000_READ_REG(hw, E1000_RXERRC);
4415 E1000_READ_REG(hw, E1000_TNCRS);
4416 E1000_READ_REG(hw, E1000_CEXTERR);
4417 E1000_READ_REG(hw, E1000_TSCTC);
4418 E1000_READ_REG(hw, E1000_TSCTFC);
4420 E1000_READ_REG(hw, E1000_MGTPRC);
4421 E1000_READ_REG(hw, E1000_MGTPDC);
4422 E1000_READ_REG(hw, E1000_MGTPTC);
4424 E1000_READ_REG(hw, E1000_IAC);
4425 E1000_READ_REG(hw, E1000_ICRXOC);
4427 /* Clear PHY statistics registers */
4428 if ((hw->phy.type == e1000_phy_82578) ||
4429 (hw->phy.type == e1000_phy_82579) ||
4430 (hw->phy.type == e1000_phy_i217) ||
4431 (hw->phy.type == e1000_phy_82577)) {
4432 ret_val = hw->phy.ops.acquire(hw);
4435 ret_val = hw->phy.ops.set_page(hw,
4436 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4439 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4440 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4441 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4442 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4443 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4444 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4445 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4446 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4447 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4448 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4449 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4450 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4451 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4452 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4454 hw->phy.ops.release(hw);