1 /*******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
35 * 82562G 10/100 Network Connection
36 * 82562G-2 10/100 Network Connection
37 * 82562GT 10/100 Network Connection
38 * 82562GT-2 10/100 Network Connection
39 * 82562V 10/100 Network Connection
40 * 82562V-2 10/100 Network Connection
41 * 82566DC-2 Gigabit Network Connection
42 * 82566DC Gigabit Network Connection
43 * 82566DM-2 Gigabit Network Connection
44 * 82566DM Gigabit Network Connection
45 * 82566MC Gigabit Network Connection
46 * 82566MM Gigabit Network Connection
47 * 82567LM Gigabit Network Connection
48 * 82567LF Gigabit Network Connection
49 * 82567V Gigabit Network Connection
50 * 82567LM-2 Gigabit Network Connection
51 * 82567LF-2 Gigabit Network Connection
52 * 82567V-2 Gigabit Network Connection
53 * 82567LF-3 Gigabit Network Connection
54 * 82567LM-3 Gigabit Network Connection
55 * 82567LM-4 Gigabit Network Connection
56 * 82577LM Gigabit Network Connection
57 * 82577LC Gigabit Network Connection
58 * 82578DM Gigabit Network Connection
59 * 82578DC Gigabit Network Connection
60 * 82579LM Gigabit Network Connection
61 * 82579V Gigabit Network Connection
64 #include "e1000_api.h"
66 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
67 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw);
68 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
69 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
70 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
71 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
72 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
73 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
74 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
75 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
76 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
77 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
78 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
79 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
80 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
82 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
84 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
85 u16 words, u16 *data);
86 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
87 u16 words, u16 *data);
88 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
89 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
90 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
92 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
93 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
94 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
95 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
96 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
97 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
98 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
99 u16 *speed, u16 *duplex);
100 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
101 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
102 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
103 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
104 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
105 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
106 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw);
107 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw);
108 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
109 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
110 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
111 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
112 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
113 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
114 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
115 u32 offset, u8 *data);
116 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
118 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
119 u32 offset, u16 *data);
120 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
121 u32 offset, u8 byte);
122 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
123 u32 offset, u8 data);
124 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
126 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
127 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
128 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
129 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
130 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
131 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
132 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
133 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
135 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
136 /* Offset 04h HSFSTS */
137 union ich8_hws_flash_status {
139 u16 flcdone:1; /* bit 0 Flash Cycle Done */
140 u16 flcerr:1; /* bit 1 Flash Cycle Error */
141 u16 dael:1; /* bit 2 Direct Access error Log */
142 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
143 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
144 u16 reserved1:2; /* bit 13:6 Reserved */
145 u16 reserved2:6; /* bit 13:6 Reserved */
146 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
147 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
153 /* Offset 06h FLCTL */
154 union ich8_hws_flash_ctrl {
155 struct ich8_hsflctl {
156 u16 flcgo:1; /* 0 Flash Cycle Go */
157 u16 flcycle:2; /* 2:1 Flash Cycle */
158 u16 reserved:5; /* 7:3 Reserved */
159 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
160 u16 flockdn:6; /* 15:10 Reserved */
165 /* ICH Flash Region Access Permissions */
166 union ich8_hws_flash_regacc {
168 u32 grra:8; /* 0:7 GbE region Read Access */
169 u32 grwa:8; /* 8:15 GbE region Write Access */
170 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
171 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
177 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
178 * @hw: pointer to the HW structure
180 * Test access to the PHY registers by reading the PHY ID registers. If
181 * the PHY ID is already known (e.g. resume path) compare it with known ID,
182 * otherwise assume the read PHY ID is correct if it is valid.
184 * Assumes the sw/fw/hw semaphore is already acquired.
186 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
193 for (retry_count = 0; retry_count < 2; retry_count++) {
194 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
195 if (ret_val || (phy_reg == 0xFFFF))
197 phy_id = (u32)(phy_reg << 16);
199 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
200 if (ret_val || (phy_reg == 0xFFFF)) {
204 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
209 if (hw->phy.id == phy_id)
213 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
218 * In case the PHY needs to be in mdio slow mode,
219 * set slow mode and try to get the PHY id again.
221 hw->phy.ops.release(hw);
222 ret_val = e1000_set_mdio_slow_mode_hv(hw);
224 ret_val = e1000_get_phy_id(hw);
225 hw->phy.ops.acquire(hw);
231 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
232 * @hw: pointer to the HW structure
234 * Workarounds/flow necessary for PHY initialization during driver load
237 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
239 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
242 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
244 ret_val = hw->phy.ops.acquire(hw);
246 DEBUGOUT("Failed to initialize PHY flow\n");
251 * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
252 * inaccessible and resetting the PHY is not blocked, toggle the
253 * LANPHYPC Value bit to force the interconnect to PCIe mode.
255 switch (hw->mac.type) {
258 * Gate automatic PHY configuration by hardware on
261 if ((hw->mac.type == e1000_pch2lan) &&
262 !(fwsm & E1000_ICH_FWSM_FW_VALID))
263 e1000_gate_hw_phy_config_ich8lan(hw, true);
265 if (e1000_phy_is_accessible_pchlan(hw)) {
271 if ((hw->mac.type == e1000_pchlan) &&
272 (fwsm & E1000_ICH_FWSM_FW_VALID))
275 if (hw->phy.ops.check_reset_block(hw)) {
276 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
280 DEBUGOUT("Toggling LANPHYPC\n");
282 /* Set Phy Config Counter to 50msec */
283 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
284 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
285 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
286 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
288 /* Toggle LANPHYPC Value bit */
289 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
290 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
291 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
292 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
293 E1000_WRITE_FLUSH(hw);
295 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
296 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
297 E1000_WRITE_FLUSH(hw);
304 hw->phy.ops.release(hw);
307 * Reset the PHY before any access to it. Doing so, ensures
308 * that the PHY is in a known good state before we read/write
309 * PHY registers. The generic reset is sufficient here,
310 * because we haven't determined the PHY type yet.
312 ret_val = e1000_phy_hw_reset_generic(hw);
314 /* Ungate automatic PHY configuration on non-managed 82579 */
315 if ((hw->mac.type == e1000_pch2lan) &&
316 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
318 e1000_gate_hw_phy_config_ich8lan(hw, false);
325 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
326 * @hw: pointer to the HW structure
328 * Initialize family-specific PHY parameters and function pointers.
330 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
332 struct e1000_phy_info *phy = &hw->phy;
333 s32 ret_val = E1000_SUCCESS;
335 DEBUGFUNC("e1000_init_phy_params_pchlan");
338 phy->reset_delay_us = 100;
340 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
341 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
342 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
343 phy->ops.set_page = e1000_set_page_igp;
344 phy->ops.read_reg = e1000_read_phy_reg_hv;
345 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
346 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
347 phy->ops.release = e1000_release_swflag_ich8lan;
348 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
349 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
350 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
351 phy->ops.write_reg = e1000_write_phy_reg_hv;
352 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
353 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
354 phy->ops.power_up = e1000_power_up_phy_copper;
355 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
356 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
358 phy->id = e1000_phy_unknown;
360 ret_val = e1000_init_phy_workarounds_pchlan(hw);
364 if (phy->id == e1000_phy_unknown)
365 switch (hw->mac.type) {
367 ret_val = e1000_get_phy_id(hw);
370 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
375 * In case the PHY needs to be in mdio slow mode,
376 * set slow mode and try to get the PHY id again.
378 ret_val = e1000_set_mdio_slow_mode_hv(hw);
381 ret_val = e1000_get_phy_id(hw);
386 phy->type = e1000_get_phy_type_from_id(phy->id);
389 case e1000_phy_82577:
390 case e1000_phy_82579:
392 phy->ops.check_polarity = e1000_check_polarity_82577;
393 phy->ops.force_speed_duplex =
394 e1000_phy_force_speed_duplex_82577;
395 phy->ops.get_cable_length = e1000_get_cable_length_82577;
396 phy->ops.get_info = e1000_get_phy_info_82577;
397 phy->ops.commit = e1000_phy_sw_reset_generic;
399 case e1000_phy_82578:
400 phy->ops.check_polarity = e1000_check_polarity_m88;
401 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
402 phy->ops.get_cable_length = e1000_get_cable_length_m88;
403 phy->ops.get_info = e1000_get_phy_info_m88;
406 ret_val = -E1000_ERR_PHY;
414 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
415 * @hw: pointer to the HW structure
417 * Initialize family-specific PHY parameters and function pointers.
419 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
421 struct e1000_phy_info *phy = &hw->phy;
425 DEBUGFUNC("e1000_init_phy_params_ich8lan");
428 phy->reset_delay_us = 100;
430 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
431 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
432 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
433 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
434 phy->ops.read_reg = e1000_read_phy_reg_igp;
435 phy->ops.release = e1000_release_swflag_ich8lan;
436 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
437 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
438 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
439 phy->ops.write_reg = e1000_write_phy_reg_igp;
440 phy->ops.power_up = e1000_power_up_phy_copper;
441 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
444 * We may need to do this twice - once for IGP and if that fails,
445 * we'll set BM func pointers and try again
447 ret_val = e1000_determine_phy_address(hw);
449 phy->ops.write_reg = e1000_write_phy_reg_bm;
450 phy->ops.read_reg = e1000_read_phy_reg_bm;
451 ret_val = e1000_determine_phy_address(hw);
453 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
459 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
462 ret_val = e1000_get_phy_id(hw);
469 case IGP03E1000_E_PHY_ID:
470 phy->type = e1000_phy_igp_3;
471 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
472 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
473 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
474 phy->ops.get_info = e1000_get_phy_info_igp;
475 phy->ops.check_polarity = e1000_check_polarity_igp;
476 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
479 case IFE_PLUS_E_PHY_ID:
481 phy->type = e1000_phy_ife;
482 phy->autoneg_mask = E1000_ALL_NOT_GIG;
483 phy->ops.get_info = e1000_get_phy_info_ife;
484 phy->ops.check_polarity = e1000_check_polarity_ife;
485 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
487 case BME1000_E_PHY_ID:
488 phy->type = e1000_phy_bm;
489 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
490 phy->ops.read_reg = e1000_read_phy_reg_bm;
491 phy->ops.write_reg = e1000_write_phy_reg_bm;
492 phy->ops.commit = e1000_phy_sw_reset_generic;
493 phy->ops.get_info = e1000_get_phy_info_m88;
494 phy->ops.check_polarity = e1000_check_polarity_m88;
495 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
498 return -E1000_ERR_PHY;
502 return E1000_SUCCESS;
506 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
507 * @hw: pointer to the HW structure
509 * Initialize family-specific NVM parameters and function
512 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
514 struct e1000_nvm_info *nvm = &hw->nvm;
515 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
516 u32 gfpreg, sector_base_addr, sector_end_addr;
519 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
521 /* Can't read flash registers if the register set isn't mapped. */
522 if (!hw->flash_address) {
523 DEBUGOUT("ERROR: Flash registers not mapped\n");
524 return -E1000_ERR_CONFIG;
527 nvm->type = e1000_nvm_flash_sw;
529 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
532 * sector_X_addr is a "sector"-aligned address (4096 bytes)
533 * Add 1 to sector_end_addr since this sector is included in
536 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
537 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
539 /* flash_base_addr is byte-aligned */
540 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
543 * find total size of the NVM, then cut in half since the total
544 * size represents two separate NVM banks.
546 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
547 << FLASH_SECTOR_ADDR_SHIFT;
548 nvm->flash_bank_size /= 2;
549 /* Adjust to word count */
550 nvm->flash_bank_size /= sizeof(u16);
552 nvm->word_size = E1000_SHADOW_RAM_WORDS;
554 /* Clear shadow ram */
555 for (i = 0; i < nvm->word_size; i++) {
556 dev_spec->shadow_ram[i].modified = false;
557 dev_spec->shadow_ram[i].value = 0xFFFF;
560 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
561 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
563 /* Function Pointers */
564 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
565 nvm->ops.release = e1000_release_nvm_ich8lan;
566 nvm->ops.read = e1000_read_nvm_ich8lan;
567 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
568 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
569 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
570 nvm->ops.write = e1000_write_nvm_ich8lan;
572 return E1000_SUCCESS;
576 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
577 * @hw: pointer to the HW structure
579 * Initialize family-specific MAC parameters and function
582 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
584 struct e1000_mac_info *mac = &hw->mac;
586 DEBUGFUNC("e1000_init_mac_params_ich8lan");
588 /* Set media type function pointer */
589 hw->phy.media_type = e1000_media_type_copper;
591 /* Set mta register count */
592 mac->mta_reg_count = 32;
593 /* Set rar entry count */
594 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
595 if (mac->type == e1000_ich8lan)
596 mac->rar_entry_count--;
597 /* Set if part includes ASF firmware */
598 mac->asf_firmware_present = true;
600 mac->has_fwsm = true;
601 /* ARC subsystem not supported */
602 mac->arc_subsystem_valid = false;
603 /* Adaptive IFS supported */
604 mac->adaptive_ifs = true;
606 /* Function pointers */
608 /* bus type/speed/width */
609 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
611 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
613 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
614 /* hw initialization */
615 mac->ops.init_hw = e1000_init_hw_ich8lan;
617 mac->ops.setup_link = e1000_setup_link_ich8lan;
618 /* physical interface setup */
619 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
621 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
623 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
624 /* multicast address update */
625 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
626 /* clear hardware counters */
627 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
629 /* LED and other operations */
634 /* check management mode */
635 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
637 mac->ops.id_led_init = e1000_id_led_init_generic;
639 mac->ops.blink_led = e1000_blink_led_generic;
641 mac->ops.setup_led = e1000_setup_led_generic;
643 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
644 /* turn on/off LED */
645 mac->ops.led_on = e1000_led_on_ich8lan;
646 mac->ops.led_off = e1000_led_off_ich8lan;
649 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
650 mac->ops.rar_set = e1000_rar_set_pch2lan;
653 /* check management mode */
654 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
656 mac->ops.id_led_init = e1000_id_led_init_pchlan;
658 mac->ops.setup_led = e1000_setup_led_pchlan;
660 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
661 /* turn on/off LED */
662 mac->ops.led_on = e1000_led_on_pchlan;
663 mac->ops.led_off = e1000_led_off_pchlan;
669 /* Enable PCS Lock-loss workaround for ICH8 */
670 if (mac->type == e1000_ich8lan)
671 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
673 /* Gate automatic PHY configuration by hardware on managed 82579 */
674 if ((mac->type == e1000_pch2lan) &&
675 (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
676 e1000_gate_hw_phy_config_ich8lan(hw, true);
678 return E1000_SUCCESS;
682 * __e1000_access_emi_reg_locked - Read/write EMI register
683 * @hw: pointer to the HW structure
684 * @addr: EMI address to program
685 * @data: pointer to value to read/write from/to the EMI address
686 * @read: boolean flag to indicate read or write
688 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
690 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
691 u16 *data, bool read)
693 s32 ret_val = E1000_SUCCESS;
695 DEBUGFUNC("__e1000_access_emi_reg_locked");
697 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
702 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
705 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
712 * e1000_read_emi_reg_locked - Read Extended Management Interface register
713 * @hw: pointer to the HW structure
714 * @addr: EMI address to program
715 * @data: value to be read from the EMI address
717 * Assumes the SW/FW/HW Semaphore is already acquired.
719 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
721 DEBUGFUNC("e1000_read_emi_reg_locked");
723 return __e1000_access_emi_reg_locked(hw, addr, data, true);
727 * e1000_write_emi_reg_locked - Write Extended Management Interface register
728 * @hw: pointer to the HW structure
729 * @addr: EMI address to program
730 * @data: value to be written to the EMI address
732 * Assumes the SW/FW/HW Semaphore is already acquired.
734 STATIC s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
736 DEBUGFUNC("e1000_read_emi_reg_locked");
738 return __e1000_access_emi_reg_locked(hw, addr, &data, false);
742 * e1000_set_eee_pchlan - Enable/disable EEE support
743 * @hw: pointer to the HW structure
745 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
746 * the link and the EEE capabilities of the link partner. The LPI Control
747 * register bits will remain set only if/when link is up.
749 STATIC s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
751 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
755 DEBUGFUNC("e1000_set_eee_pchlan");
757 if ((hw->phy.type != e1000_phy_82579) &&
758 (hw->phy.type != e1000_phy_i217))
759 return E1000_SUCCESS;
761 ret_val = hw->phy.ops.acquire(hw);
765 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
769 /* Clear bits that enable EEE in various speeds */
770 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
772 /* Enable EEE if not disabled by user */
773 if (!dev_spec->eee_disable) {
774 u16 lpa, pcs_status, data;
776 /* Save off link partner's EEE ability */
777 switch (hw->phy.type) {
778 case e1000_phy_82579:
779 lpa = I82579_EEE_LP_ABILITY;
780 pcs_status = I82579_EEE_PCS_STATUS;
783 lpa = I217_EEE_LP_ABILITY;
784 pcs_status = I217_EEE_PCS_STATUS;
787 ret_val = -E1000_ERR_PHY;
790 ret_val = e1000_read_emi_reg_locked(hw, lpa,
791 &dev_spec->eee_lp_ability);
796 * Enable EEE only for speeds in which the link partner is
799 if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
800 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
802 if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
803 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
804 if (data & NWAY_LPAR_100TX_FD_CAPS)
805 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
808 * EEE is not supported in 100Half, so ignore
809 * partner's EEE in 100 ability if full-duplex
812 dev_spec->eee_lp_ability &=
813 ~I82579_EEE_100_SUPPORTED;
816 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
817 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
822 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
824 hw->phy.ops.release(hw);
830 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
831 * @hw: pointer to the HW structure
833 * Checks to see of the link status of the hardware has changed. If a
834 * change in link status has been detected, then we read the PHY registers
835 * to get the current speed/duplex if link exists.
837 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
839 struct e1000_mac_info *mac = &hw->mac;
844 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
847 * We only want to go out to the PHY registers to see if Auto-Neg
848 * has completed and/or if our link status has changed. The
849 * get_link_status flag is set upon receiving a Link Status
850 * Change or Rx Sequence Error interrupt.
852 if (!mac->get_link_status)
853 return E1000_SUCCESS;
856 * First we want to see if the MII Status Register reports
857 * link. If so, then we want to get the current speed/duplex
860 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
864 if (hw->mac.type == e1000_pchlan) {
865 ret_val = e1000_k1_gig_workaround_hv(hw, link);
870 /* Clear link partner's EEE ability */
871 hw->dev_spec.ich8lan.eee_lp_ability = 0;
874 return E1000_SUCCESS; /* No link detected */
876 mac->get_link_status = false;
878 switch (hw->mac.type) {
880 ret_val = e1000_k1_workaround_lv(hw);
885 if (hw->phy.type == e1000_phy_82578) {
886 ret_val = e1000_link_stall_workaround_hv(hw);
892 * Workaround for PCHx parts in half-duplex:
893 * Set the number of preambles removed from the packet
894 * when it is passed from the PHY to the MAC to prevent
895 * the MAC from misinterpreting the packet type.
897 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
898 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
900 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
902 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
904 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
911 * Check if there was DownShift, must be checked
912 * immediately after link-up
914 e1000_check_downshift_generic(hw);
916 /* Enable/Disable EEE after link up */
917 ret_val = e1000_set_eee_pchlan(hw);
922 * If we are forcing speed/duplex, then we simply return since
923 * we have already determined whether we have link or not.
926 return -E1000_ERR_CONFIG;
929 * Auto-Neg is enabled. Auto Speed Detection takes care
930 * of MAC speed/duplex configuration. So we only need to
931 * configure Collision Distance in the MAC.
933 mac->ops.config_collision_dist(hw);
936 * Configure Flow Control now that Auto-Neg has completed.
937 * First, we need to restore the desired flow control
938 * settings because we may have had to re-autoneg with a
939 * different link partner.
941 ret_val = e1000_config_fc_after_link_up_generic(hw);
943 DEBUGOUT("Error configuring flow control\n");
949 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
950 * @hw: pointer to the HW structure
952 * Initialize family-specific function pointers for PHY, MAC, and NVM.
954 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
956 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
958 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
959 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
960 switch (hw->mac.type) {
964 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
968 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
976 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
977 * @hw: pointer to the HW structure
979 * Acquires the mutex for performing NVM operations.
981 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
983 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
985 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
987 return E1000_SUCCESS;
991 * e1000_release_nvm_ich8lan - Release NVM mutex
992 * @hw: pointer to the HW structure
994 * Releases the mutex used while performing NVM operations.
996 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
998 DEBUGFUNC("e1000_release_nvm_ich8lan");
1000 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1006 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1007 * @hw: pointer to the HW structure
1009 * Acquires the software control flag for performing PHY and select
1012 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1014 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1015 s32 ret_val = E1000_SUCCESS;
1017 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1019 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1022 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1023 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1031 DEBUGOUT("SW has already locked the resource.\n");
1032 ret_val = -E1000_ERR_CONFIG;
1036 timeout = SW_FLAG_TIMEOUT;
1038 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1039 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1042 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1043 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1051 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1052 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1053 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1054 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1055 ret_val = -E1000_ERR_CONFIG;
1061 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1067 * e1000_release_swflag_ich8lan - Release software control flag
1068 * @hw: pointer to the HW structure
1070 * Releases the software control flag for performing PHY and select
1073 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1077 DEBUGFUNC("e1000_release_swflag_ich8lan");
1079 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1081 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1082 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1083 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1085 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1088 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1094 * e1000_check_mng_mode_ich8lan - Checks management mode
1095 * @hw: pointer to the HW structure
1097 * This checks if the adapter has any manageability enabled.
1098 * This is a function pointer entry point only called by read/write
1099 * routines for the PHY and NVM parts.
1101 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1105 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1107 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1109 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1110 ((fwsm & E1000_FWSM_MODE_MASK) ==
1111 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1115 * e1000_check_mng_mode_pchlan - Checks management mode
1116 * @hw: pointer to the HW structure
1118 * This checks if the adapter has iAMT enabled.
1119 * This is a function pointer entry point only called by read/write
1120 * routines for the PHY and NVM parts.
1122 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1126 DEBUGFUNC("e1000_check_mng_mode_pchlan");
1128 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1130 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1131 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1135 * e1000_rar_set_pch2lan - Set receive address register
1136 * @hw: pointer to the HW structure
1137 * @addr: pointer to the receive address
1138 * @index: receive address array register
1140 * Sets the receive address array register at index to the address passed
1141 * in by addr. For 82579, RAR[0] is the base address register that is to
1142 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1143 * Use SHRA[0-3] in place of those reserved for ME.
1145 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1147 u32 rar_low, rar_high;
1149 DEBUGFUNC("e1000_rar_set_pch2lan");
1152 * HW expects these in little endian so we reverse the byte order
1153 * from network order (big endian) to little endian
1155 rar_low = ((u32) addr[0] |
1156 ((u32) addr[1] << 8) |
1157 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1159 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1161 /* If MAC address zero, no need to set the AV bit */
1162 if (rar_low || rar_high)
1163 rar_high |= E1000_RAH_AV;
1166 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1167 E1000_WRITE_FLUSH(hw);
1168 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1169 E1000_WRITE_FLUSH(hw);
1173 if (index < hw->mac.rar_entry_count) {
1176 ret_val = e1000_acquire_swflag_ich8lan(hw);
1180 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1181 E1000_WRITE_FLUSH(hw);
1182 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1183 E1000_WRITE_FLUSH(hw);
1185 e1000_release_swflag_ich8lan(hw);
1187 /* verify the register updates */
1188 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1189 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1192 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1193 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1197 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1201 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1202 * @hw: pointer to the HW structure
1204 * Checks if firmware is blocking the reset of the PHY.
1205 * This is a function pointer entry point only called by
1208 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1212 DEBUGFUNC("e1000_check_reset_block_ich8lan");
1214 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1216 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1217 : E1000_BLK_PHY_RESET;
1221 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1222 * @hw: pointer to the HW structure
1224 * Assumes semaphore already acquired.
1227 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1230 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1231 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1232 E1000_STRAP_SMT_FREQ_SHIFT;
1233 s32 ret_val = E1000_SUCCESS;
1235 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1237 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1241 phy_data &= ~HV_SMB_ADDR_MASK;
1242 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1243 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1245 if (hw->phy.type == e1000_phy_i217) {
1246 /* Restore SMBus frequency */
1248 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1249 phy_data |= (freq & (1 << 0)) <<
1250 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1251 phy_data |= (freq & (1 << 1)) <<
1252 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1254 DEBUGOUT("Unsupported SMB frequency in PHY\n");
1258 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1262 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1263 * @hw: pointer to the HW structure
1265 * SW should configure the LCD from the NVM extended configuration region
1266 * as a workaround for certain parts.
1268 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1270 struct e1000_phy_info *phy = &hw->phy;
1271 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1272 s32 ret_val = E1000_SUCCESS;
1273 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1275 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1278 * Initialize the PHY from the NVM on ICH platforms. This
1279 * is needed due to an issue where the NVM configuration is
1280 * not properly autoloaded after power transitions.
1281 * Therefore, after each PHY reset, we will load the
1282 * configuration data out of the NVM manually.
1284 switch (hw->mac.type) {
1286 if (phy->type != e1000_phy_igp_3)
1289 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1290 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1291 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1297 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1303 ret_val = hw->phy.ops.acquire(hw);
1307 data = E1000_READ_REG(hw, E1000_FEXTNVM);
1308 if (!(data & sw_cfg_mask))
1312 * Make sure HW does not configure LCD from PHY
1313 * extended configuration before SW configuration
1315 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1316 if ((hw->mac.type < e1000_pch2lan) &&
1317 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1320 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1321 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1322 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1326 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1327 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1329 if (((hw->mac.type == e1000_pchlan) &&
1330 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1331 (hw->mac.type > e1000_pchlan)) {
1333 * HW configures the SMBus address and LEDs when the
1334 * OEM and LCD Write Enable bits are set in the NVM.
1335 * When both NVM bits are cleared, SW will configure
1338 ret_val = e1000_write_smbus_addr(hw);
1342 data = E1000_READ_REG(hw, E1000_LEDCTL);
1343 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1349 /* Configure LCD from extended configuration region. */
1351 /* cnf_base_addr is in DWORD */
1352 word_addr = (u16)(cnf_base_addr << 1);
1354 for (i = 0; i < cnf_size; i++) {
1355 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1360 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1365 /* Save off the PHY page for future writes. */
1366 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1367 phy_page = reg_data;
1371 reg_addr &= PHY_REG_MASK;
1372 reg_addr |= phy_page;
1374 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1381 hw->phy.ops.release(hw);
1386 * e1000_k1_gig_workaround_hv - K1 Si workaround
1387 * @hw: pointer to the HW structure
1388 * @link: link up bool flag
1390 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1391 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1392 * If link is down, the function will restore the default K1 setting located
1395 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1397 s32 ret_val = E1000_SUCCESS;
1399 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1401 DEBUGFUNC("e1000_k1_gig_workaround_hv");
1403 if (hw->mac.type != e1000_pchlan)
1404 return E1000_SUCCESS;
1406 /* Wrap the whole flow with the sw flag */
1407 ret_val = hw->phy.ops.acquire(hw);
1411 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1413 if (hw->phy.type == e1000_phy_82578) {
1414 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1419 status_reg &= BM_CS_STATUS_LINK_UP |
1420 BM_CS_STATUS_RESOLVED |
1421 BM_CS_STATUS_SPEED_MASK;
1423 if (status_reg == (BM_CS_STATUS_LINK_UP |
1424 BM_CS_STATUS_RESOLVED |
1425 BM_CS_STATUS_SPEED_1000))
1429 if (hw->phy.type == e1000_phy_82577) {
1430 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1435 status_reg &= HV_M_STATUS_LINK_UP |
1436 HV_M_STATUS_AUTONEG_COMPLETE |
1437 HV_M_STATUS_SPEED_MASK;
1439 if (status_reg == (HV_M_STATUS_LINK_UP |
1440 HV_M_STATUS_AUTONEG_COMPLETE |
1441 HV_M_STATUS_SPEED_1000))
1445 /* Link stall fix for link up */
1446 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1452 /* Link stall fix for link down */
1453 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1459 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1462 hw->phy.ops.release(hw);
1468 * e1000_configure_k1_ich8lan - Configure K1 power state
1469 * @hw: pointer to the HW structure
1470 * @enable: K1 state to configure
1472 * Configure the K1 power state based on the provided parameter.
1473 * Assumes semaphore already acquired.
1475 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1477 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1479 s32 ret_val = E1000_SUCCESS;
1485 DEBUGFUNC("e1000_configure_k1_ich8lan");
1487 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1493 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1495 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1497 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1503 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1504 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1506 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1507 reg |= E1000_CTRL_FRCSPD;
1508 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1510 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1511 E1000_WRITE_FLUSH(hw);
1513 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1514 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1515 E1000_WRITE_FLUSH(hw);
1518 return E1000_SUCCESS;
1522 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1523 * @hw: pointer to the HW structure
1524 * @d0_state: boolean if entering d0 or d3 device state
1526 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1527 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1528 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1530 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1536 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1538 if (hw->mac.type < e1000_pchlan)
1541 ret_val = hw->phy.ops.acquire(hw);
1545 if (hw->mac.type == e1000_pchlan) {
1546 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1547 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1551 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1552 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1555 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1557 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1561 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1564 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1565 oem_reg |= HV_OEM_BITS_GBE_DIS;
1567 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1568 oem_reg |= HV_OEM_BITS_LPLU;
1570 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1571 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1572 oem_reg |= HV_OEM_BITS_GBE_DIS;
1574 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1575 E1000_PHY_CTRL_NOND0A_LPLU))
1576 oem_reg |= HV_OEM_BITS_LPLU;
1579 /* Set Restart auto-neg to activate the bits */
1580 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1581 !hw->phy.ops.check_reset_block(hw))
1582 oem_reg |= HV_OEM_BITS_RESTART_AN;
1584 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1587 hw->phy.ops.release(hw);
1594 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1595 * @hw: pointer to the HW structure
1597 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1602 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1604 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1608 data |= HV_KMRN_MDIO_SLOW;
1610 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1616 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1617 * done after every PHY reset.
1619 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1621 s32 ret_val = E1000_SUCCESS;
1624 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1626 if (hw->mac.type != e1000_pchlan)
1627 return E1000_SUCCESS;
1629 /* Set MDIO slow mode before any other MDIO access */
1630 if (hw->phy.type == e1000_phy_82577) {
1631 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1636 if (((hw->phy.type == e1000_phy_82577) &&
1637 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1638 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1639 /* Disable generation of early preamble */
1640 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1644 /* Preamble tuning for SSC */
1645 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
1651 if (hw->phy.type == e1000_phy_82578) {
1653 * Return registers to default by doing a soft reset then
1654 * writing 0x3140 to the control register.
1656 if (hw->phy.revision < 2) {
1657 e1000_phy_sw_reset_generic(hw);
1658 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1664 ret_val = hw->phy.ops.acquire(hw);
1669 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1670 hw->phy.ops.release(hw);
1675 * Configure the K1 Si workaround during phy reset assuming there is
1676 * link so that it disables K1 if link is in 1Gbps.
1678 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1682 /* Workaround for link disconnects on a busy hub in half duplex */
1683 ret_val = hw->phy.ops.acquire(hw);
1686 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1689 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
1694 /* set MSE higher to enable link to stay up when noise is high */
1695 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
1697 hw->phy.ops.release(hw);
1703 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1704 * @hw: pointer to the HW structure
1706 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1712 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1714 ret_val = hw->phy.ops.acquire(hw);
1717 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1721 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1722 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1723 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1724 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1725 (u16)(mac_reg & 0xFFFF));
1726 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1727 (u16)((mac_reg >> 16) & 0xFFFF));
1729 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1730 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1731 (u16)(mac_reg & 0xFFFF));
1732 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1733 (u16)((mac_reg & E1000_RAH_AV)
1737 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1740 hw->phy.ops.release(hw);
1743 static u32 e1000_calc_rx_da_crc(u8 mac[])
1745 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1746 u32 i, j, mask, crc;
1748 DEBUGFUNC("e1000_calc_rx_da_crc");
1751 for (i = 0; i < 6; i++) {
1753 for (j = 8; j > 0; j--) {
1754 mask = (crc & 1) * (-1);
1755 crc = (crc >> 1) ^ (poly & mask);
1762 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1764 * @hw: pointer to the HW structure
1765 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1767 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1769 s32 ret_val = E1000_SUCCESS;
1774 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1776 if (hw->mac.type != e1000_pch2lan)
1777 return E1000_SUCCESS;
1779 /* disable Rx path while enabling/disabling workaround */
1780 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1781 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
1782 phy_reg | (1 << 14));
1788 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1789 * SHRAL/H) and initial CRC values to the MAC
1791 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1792 u8 mac_addr[ETH_ADDR_LEN] = {0};
1793 u32 addr_high, addr_low;
1795 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1796 if (!(addr_high & E1000_RAH_AV))
1798 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1799 mac_addr[0] = (addr_low & 0xFF);
1800 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1801 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1802 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1803 mac_addr[4] = (addr_high & 0xFF);
1804 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1806 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1807 e1000_calc_rx_da_crc(mac_addr));
1810 /* Write Rx addresses to the PHY */
1811 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1813 /* Enable jumbo frame workaround in the MAC */
1814 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1815 mac_reg &= ~(1 << 14);
1816 mac_reg |= (7 << 15);
1817 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1819 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1820 mac_reg |= E1000_RCTL_SECRC;
1821 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1823 ret_val = e1000_read_kmrn_reg_generic(hw,
1824 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1828 ret_val = e1000_write_kmrn_reg_generic(hw,
1829 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1833 ret_val = e1000_read_kmrn_reg_generic(hw,
1834 E1000_KMRNCTRLSTA_HD_CTRL,
1838 data &= ~(0xF << 8);
1840 ret_val = e1000_write_kmrn_reg_generic(hw,
1841 E1000_KMRNCTRLSTA_HD_CTRL,
1846 /* Enable jumbo frame workaround in the PHY */
1847 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1848 data &= ~(0x7F << 5);
1849 data |= (0x37 << 5);
1850 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1853 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1855 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1858 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1859 data &= ~(0x3FF << 2);
1860 data |= (0x1A << 2);
1861 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1864 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
1867 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1868 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
1873 /* Write MAC register values back to h/w defaults */
1874 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1875 mac_reg &= ~(0xF << 14);
1876 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1878 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1879 mac_reg &= ~E1000_RCTL_SECRC;
1880 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1882 ret_val = e1000_read_kmrn_reg_generic(hw,
1883 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1887 ret_val = e1000_write_kmrn_reg_generic(hw,
1888 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1892 ret_val = e1000_read_kmrn_reg_generic(hw,
1893 E1000_KMRNCTRLSTA_HD_CTRL,
1897 data &= ~(0xF << 8);
1899 ret_val = e1000_write_kmrn_reg_generic(hw,
1900 E1000_KMRNCTRLSTA_HD_CTRL,
1905 /* Write PHY register values back to h/w defaults */
1906 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1907 data &= ~(0x7F << 5);
1908 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1911 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1913 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1916 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1917 data &= ~(0x3FF << 2);
1919 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1922 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1925 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1926 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
1932 /* re-enable Rx path after enabling/disabling workaround */
1933 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
1938 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1939 * done after every PHY reset.
1941 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1943 s32 ret_val = E1000_SUCCESS;
1945 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1947 if (hw->mac.type != e1000_pch2lan)
1948 return E1000_SUCCESS;
1950 /* Set MDIO slow mode before any other MDIO access */
1951 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1955 ret_val = hw->phy.ops.acquire(hw);
1958 /* set MSE higher to enable link to stay up when noise is high */
1959 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
1962 /* drop link after 5 times MSE threshold was reached */
1963 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
1965 hw->phy.ops.release(hw);
1971 * e1000_k1_gig_workaround_lv - K1 Si workaround
1972 * @hw: pointer to the HW structure
1974 * Workaround to set the K1 beacon duration for 82579 parts
1976 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1978 s32 ret_val = E1000_SUCCESS;
1983 DEBUGFUNC("e1000_k1_workaround_lv");
1985 if (hw->mac.type != e1000_pch2lan)
1986 return E1000_SUCCESS;
1988 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1989 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1993 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1994 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1995 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1996 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1998 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
2002 if (status_reg & HV_M_STATUS_SPEED_1000) {
2005 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2006 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2007 /* LV 1G Packet drop issue wa */
2008 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2012 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2013 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2018 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2019 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2021 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2022 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2029 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2030 * @hw: pointer to the HW structure
2031 * @gate: boolean set to true to gate, false to ungate
2033 * Gate/ungate the automatic PHY configuration via hardware; perform
2034 * the configuration via software instead.
2036 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2040 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2042 if (hw->mac.type != e1000_pch2lan)
2045 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2048 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2050 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2052 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2056 * e1000_lan_init_done_ich8lan - Check for PHY config completion
2057 * @hw: pointer to the HW structure
2059 * Check the appropriate indication the MAC has finished configuring the
2060 * PHY after a software reset.
2062 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2064 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2066 DEBUGFUNC("e1000_lan_init_done_ich8lan");
2068 /* Wait for basic configuration completes before proceeding */
2070 data = E1000_READ_REG(hw, E1000_STATUS);
2071 data &= E1000_STATUS_LAN_INIT_DONE;
2073 } while ((!data) && --loop);
2076 * If basic configuration is incomplete before the above loop
2077 * count reaches 0, loading the configuration from NVM will
2078 * leave the PHY in a bad state possibly resulting in no link.
2081 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2083 /* Clear the Init Done bit for the next init event */
2084 data = E1000_READ_REG(hw, E1000_STATUS);
2085 data &= ~E1000_STATUS_LAN_INIT_DONE;
2086 E1000_WRITE_REG(hw, E1000_STATUS, data);
2090 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2091 * @hw: pointer to the HW structure
2093 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2095 s32 ret_val = E1000_SUCCESS;
2098 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2100 if (hw->phy.ops.check_reset_block(hw))
2101 return E1000_SUCCESS;
2103 /* Allow time for h/w to get to quiescent state after reset */
2106 /* Perform any necessary post-reset workarounds */
2107 switch (hw->mac.type) {
2109 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2114 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2122 /* Clear the host wakeup bit after lcd reset */
2123 if (hw->mac.type >= e1000_pchlan) {
2124 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
2125 reg &= ~BM_WUC_HOST_WU_BIT;
2126 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2129 /* Configure the LCD with the extended configuration region in NVM */
2130 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2134 /* Configure the LCD with the OEM bits in NVM */
2135 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2137 if (hw->mac.type == e1000_pch2lan) {
2138 /* Ungate automatic PHY configuration on non-managed 82579 */
2139 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2140 E1000_ICH_FWSM_FW_VALID)) {
2142 e1000_gate_hw_phy_config_ich8lan(hw, false);
2145 /* Set EEE LPI Update Timer to 200usec */
2146 ret_val = hw->phy.ops.acquire(hw);
2149 ret_val = e1000_write_emi_reg_locked(hw,
2150 I82579_LPI_UPDATE_TIMER,
2152 hw->phy.ops.release(hw);
2159 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2160 * @hw: pointer to the HW structure
2163 * This is a function pointer entry point called by drivers
2164 * or other shared routines.
2166 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2168 s32 ret_val = E1000_SUCCESS;
2170 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2172 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2173 if ((hw->mac.type == e1000_pch2lan) &&
2174 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2175 e1000_gate_hw_phy_config_ich8lan(hw, true);
2177 ret_val = e1000_phy_hw_reset_generic(hw);
2181 return e1000_post_phy_reset_ich8lan(hw);
2185 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2186 * @hw: pointer to the HW structure
2187 * @active: true to enable LPLU, false to disable
2189 * Sets the LPLU state according to the active flag. For PCH, if OEM write
2190 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2191 * the phy speed. This function will manually set the LPLU bit and restart
2192 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
2193 * since it configures the same bit.
2195 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2197 s32 ret_val = E1000_SUCCESS;
2200 DEBUGFUNC("e1000_set_lplu_state_pchlan");
2202 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2207 oem_reg |= HV_OEM_BITS_LPLU;
2209 oem_reg &= ~HV_OEM_BITS_LPLU;
2211 if (!hw->phy.ops.check_reset_block(hw))
2212 oem_reg |= HV_OEM_BITS_RESTART_AN;
2214 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2218 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2219 * @hw: pointer to the HW structure
2220 * @active: true to enable LPLU, false to disable
2222 * Sets the LPLU D0 state according to the active flag. When
2223 * activating LPLU this function also disables smart speed
2224 * and vice versa. LPLU will not be activated unless the
2225 * device autonegotiation advertisement meets standards of
2226 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2227 * This is a function pointer entry point only called by
2228 * PHY setup routines.
2230 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2232 struct e1000_phy_info *phy = &hw->phy;
2234 s32 ret_val = E1000_SUCCESS;
2237 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2239 if (phy->type == e1000_phy_ife)
2240 return E1000_SUCCESS;
2242 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2245 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2246 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2248 if (phy->type != e1000_phy_igp_3)
2249 return E1000_SUCCESS;
2252 * Call gig speed drop workaround on LPLU before accessing
2255 if (hw->mac.type == e1000_ich8lan)
2256 e1000_gig_downshift_workaround_ich8lan(hw);
2258 /* When LPLU is enabled, we should disable SmartSpeed */
2259 ret_val = phy->ops.read_reg(hw,
2260 IGP01E1000_PHY_PORT_CONFIG,
2262 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2263 ret_val = phy->ops.write_reg(hw,
2264 IGP01E1000_PHY_PORT_CONFIG,
2269 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2270 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2272 if (phy->type != e1000_phy_igp_3)
2273 return E1000_SUCCESS;
2276 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2277 * during Dx states where the power conservation is most
2278 * important. During driver activity we should enable
2279 * SmartSpeed, so performance is maintained.
2281 if (phy->smart_speed == e1000_smart_speed_on) {
2282 ret_val = phy->ops.read_reg(hw,
2283 IGP01E1000_PHY_PORT_CONFIG,
2288 data |= IGP01E1000_PSCFR_SMART_SPEED;
2289 ret_val = phy->ops.write_reg(hw,
2290 IGP01E1000_PHY_PORT_CONFIG,
2294 } else if (phy->smart_speed == e1000_smart_speed_off) {
2295 ret_val = phy->ops.read_reg(hw,
2296 IGP01E1000_PHY_PORT_CONFIG,
2301 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2302 ret_val = phy->ops.write_reg(hw,
2303 IGP01E1000_PHY_PORT_CONFIG,
2310 return E1000_SUCCESS;
2314 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2315 * @hw: pointer to the HW structure
2316 * @active: true to enable LPLU, false to disable
2318 * Sets the LPLU D3 state according to the active flag. When
2319 * activating LPLU this function also disables smart speed
2320 * and vice versa. LPLU will not be activated unless the
2321 * device autonegotiation advertisement meets standards of
2322 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2323 * This is a function pointer entry point only called by
2324 * PHY setup routines.
2326 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2328 struct e1000_phy_info *phy = &hw->phy;
2330 s32 ret_val = E1000_SUCCESS;
2333 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2335 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2338 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2339 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2341 if (phy->type != e1000_phy_igp_3)
2342 return E1000_SUCCESS;
2345 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2346 * during Dx states where the power conservation is most
2347 * important. During driver activity we should enable
2348 * SmartSpeed, so performance is maintained.
2350 if (phy->smart_speed == e1000_smart_speed_on) {
2351 ret_val = phy->ops.read_reg(hw,
2352 IGP01E1000_PHY_PORT_CONFIG,
2357 data |= IGP01E1000_PSCFR_SMART_SPEED;
2358 ret_val = phy->ops.write_reg(hw,
2359 IGP01E1000_PHY_PORT_CONFIG,
2363 } else if (phy->smart_speed == e1000_smart_speed_off) {
2364 ret_val = phy->ops.read_reg(hw,
2365 IGP01E1000_PHY_PORT_CONFIG,
2370 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2371 ret_val = phy->ops.write_reg(hw,
2372 IGP01E1000_PHY_PORT_CONFIG,
2377 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2378 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2379 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2380 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2381 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2383 if (phy->type != e1000_phy_igp_3)
2384 return E1000_SUCCESS;
2387 * Call gig speed drop workaround on LPLU before accessing
2390 if (hw->mac.type == e1000_ich8lan)
2391 e1000_gig_downshift_workaround_ich8lan(hw);
2393 /* When LPLU is enabled, we should disable SmartSpeed */
2394 ret_val = phy->ops.read_reg(hw,
2395 IGP01E1000_PHY_PORT_CONFIG,
2400 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2401 ret_val = phy->ops.write_reg(hw,
2402 IGP01E1000_PHY_PORT_CONFIG,
2410 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2411 * @hw: pointer to the HW structure
2412 * @bank: pointer to the variable that returns the active bank
2414 * Reads signature byte from the NVM using the flash access registers.
2415 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2417 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2420 struct e1000_nvm_info *nvm = &hw->nvm;
2421 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2422 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2426 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2428 switch (hw->mac.type) {
2431 eecd = E1000_READ_REG(hw, E1000_EECD);
2432 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2433 E1000_EECD_SEC1VAL_VALID_MASK) {
2434 if (eecd & E1000_EECD_SEC1VAL)
2439 return E1000_SUCCESS;
2441 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2444 /* set bank to 0 in case flash read fails */
2448 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2452 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2453 E1000_ICH_NVM_SIG_VALUE) {
2455 return E1000_SUCCESS;
2459 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2464 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2465 E1000_ICH_NVM_SIG_VALUE) {
2467 return E1000_SUCCESS;
2470 DEBUGOUT("ERROR: No valid NVM bank present\n");
2471 return -E1000_ERR_NVM;
2476 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2477 * @hw: pointer to the HW structure
2478 * @offset: The offset (in bytes) of the word(s) to read.
2479 * @words: Size of data to read in words
2480 * @data: Pointer to the word(s) to read at offset.
2482 * Reads a word(s) from the NVM using the flash access registers.
2484 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2487 struct e1000_nvm_info *nvm = &hw->nvm;
2488 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2490 s32 ret_val = E1000_SUCCESS;
2494 DEBUGFUNC("e1000_read_nvm_ich8lan");
2496 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2498 DEBUGOUT("nvm parameter(s) out of bounds\n");
2499 ret_val = -E1000_ERR_NVM;
2503 nvm->ops.acquire(hw);
2505 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2506 if (ret_val != E1000_SUCCESS) {
2507 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2511 act_offset = (bank) ? nvm->flash_bank_size : 0;
2512 act_offset += offset;
2514 ret_val = E1000_SUCCESS;
2515 for (i = 0; i < words; i++) {
2516 if (dev_spec->shadow_ram[offset+i].modified) {
2517 data[i] = dev_spec->shadow_ram[offset+i].value;
2519 ret_val = e1000_read_flash_word_ich8lan(hw,
2528 nvm->ops.release(hw);
2532 DEBUGOUT1("NVM read error: %d\n", ret_val);
2538 * e1000_flash_cycle_init_ich8lan - Initialize flash
2539 * @hw: pointer to the HW structure
2541 * This function does initial flash setup so that a new read/write/erase cycle
2544 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2546 union ich8_hws_flash_status hsfsts;
2547 s32 ret_val = -E1000_ERR_NVM;
2549 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2551 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2553 /* Check if the flash descriptor is valid */
2554 if (!hsfsts.hsf_status.fldesvalid) {
2555 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
2556 return -E1000_ERR_NVM;
2559 /* Clear FCERR and DAEL in hw status by writing 1 */
2560 hsfsts.hsf_status.flcerr = 1;
2561 hsfsts.hsf_status.dael = 1;
2563 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2566 * Either we should have a hardware SPI cycle in progress
2567 * bit to check against, in order to start a new cycle or
2568 * FDONE bit should be changed in the hardware so that it
2569 * is 1 after hardware reset, which can then be used as an
2570 * indication whether a cycle is in progress or has been
2574 if (!hsfsts.hsf_status.flcinprog) {
2576 * There is no cycle running at present,
2577 * so we can start a cycle.
2578 * Begin by setting Flash Cycle Done.
2580 hsfsts.hsf_status.flcdone = 1;
2581 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2582 ret_val = E1000_SUCCESS;
2587 * Otherwise poll for sometime so the current
2588 * cycle has a chance to end before giving up.
2590 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2591 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2593 if (!hsfsts.hsf_status.flcinprog) {
2594 ret_val = E1000_SUCCESS;
2599 if (ret_val == E1000_SUCCESS) {
2601 * Successful in waiting for previous cycle to timeout,
2602 * now set the Flash Cycle Done.
2604 hsfsts.hsf_status.flcdone = 1;
2605 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2608 DEBUGOUT("Flash controller busy, cannot get access\n");
2616 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2617 * @hw: pointer to the HW structure
2618 * @timeout: maximum time to wait for completion
2620 * This function starts a flash cycle and waits for its completion.
2622 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2624 union ich8_hws_flash_ctrl hsflctl;
2625 union ich8_hws_flash_status hsfsts;
2628 DEBUGFUNC("e1000_flash_cycle_ich8lan");
2630 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2631 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2632 hsflctl.hsf_ctrl.flcgo = 1;
2633 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2635 /* wait till FDONE bit is set to 1 */
2637 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2638 if (hsfsts.hsf_status.flcdone)
2641 } while (i++ < timeout);
2643 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
2644 return E1000_SUCCESS;
2646 return -E1000_ERR_NVM;
2650 * e1000_read_flash_word_ich8lan - Read word from flash
2651 * @hw: pointer to the HW structure
2652 * @offset: offset to data location
2653 * @data: pointer to the location for storing the data
2655 * Reads the flash word at offset into data. Offset is converted
2656 * to bytes before read.
2658 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2661 DEBUGFUNC("e1000_read_flash_word_ich8lan");
2664 return -E1000_ERR_NVM;
2666 /* Must convert offset into bytes. */
2669 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2673 * e1000_read_flash_byte_ich8lan - Read byte from flash
2674 * @hw: pointer to the HW structure
2675 * @offset: The offset of the byte to read.
2676 * @data: Pointer to a byte to store the value read.
2678 * Reads a single byte from the NVM using the flash access registers.
2680 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2686 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2692 return E1000_SUCCESS;
2696 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2697 * @hw: pointer to the HW structure
2698 * @offset: The offset (in bytes) of the byte or word to read.
2699 * @size: Size of data to read, 1=byte 2=word
2700 * @data: Pointer to the word to store the value read.
2702 * Reads a byte or word from the NVM using the flash access registers.
2704 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2707 union ich8_hws_flash_status hsfsts;
2708 union ich8_hws_flash_ctrl hsflctl;
2709 u32 flash_linear_addr;
2711 s32 ret_val = -E1000_ERR_NVM;
2714 DEBUGFUNC("e1000_read_flash_data_ich8lan");
2716 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2717 return -E1000_ERR_NVM;
2719 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2720 hw->nvm.flash_base_addr;
2725 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2726 if (ret_val != E1000_SUCCESS)
2729 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2730 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2731 hsflctl.hsf_ctrl.fldbcount = size - 1;
2732 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2733 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2735 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2737 ret_val = e1000_flash_cycle_ich8lan(hw,
2738 ICH_FLASH_READ_COMMAND_TIMEOUT);
2741 * Check if FCERR is set to 1, if set to 1, clear it
2742 * and try the whole sequence a few more times, else
2743 * read in (shift in) the Flash Data0, the order is
2744 * least significant byte first msb to lsb
2746 if (ret_val == E1000_SUCCESS) {
2747 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2749 *data = (u8)(flash_data & 0x000000FF);
2751 *data = (u16)(flash_data & 0x0000FFFF);
2755 * If we've gotten here, then things are probably
2756 * completely hosed, but if the error condition is
2757 * detected, it won't hurt to give it another try...
2758 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2760 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2762 if (hsfsts.hsf_status.flcerr) {
2763 /* Repeat for some time before giving up. */
2765 } else if (!hsfsts.hsf_status.flcdone) {
2766 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
2770 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2776 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2777 * @hw: pointer to the HW structure
2778 * @offset: The offset (in bytes) of the word(s) to write.
2779 * @words: Size of data to write in words
2780 * @data: Pointer to the word(s) to write at offset.
2782 * Writes a byte or word to the NVM using the flash access registers.
2784 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2787 struct e1000_nvm_info *nvm = &hw->nvm;
2788 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2791 DEBUGFUNC("e1000_write_nvm_ich8lan");
2793 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2795 DEBUGOUT("nvm parameter(s) out of bounds\n");
2796 return -E1000_ERR_NVM;
2799 nvm->ops.acquire(hw);
2801 for (i = 0; i < words; i++) {
2802 dev_spec->shadow_ram[offset+i].modified = true;
2803 dev_spec->shadow_ram[offset+i].value = data[i];
2806 nvm->ops.release(hw);
2808 return E1000_SUCCESS;
2812 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2813 * @hw: pointer to the HW structure
2815 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2816 * which writes the checksum to the shadow ram. The changes in the shadow
2817 * ram are then committed to the EEPROM by processing each bank at a time
2818 * checking for the modified bit and writing only the pending changes.
2819 * After a successful commit, the shadow ram is cleared and is ready for
2822 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2824 struct e1000_nvm_info *nvm = &hw->nvm;
2825 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2826 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2830 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2832 ret_val = e1000_update_nvm_checksum_generic(hw);
2836 if (nvm->type != e1000_nvm_flash_sw)
2839 nvm->ops.acquire(hw);
2842 * We're writing to the opposite bank so if we're on bank 1,
2843 * write to bank 0 etc. We also need to erase the segment that
2844 * is going to be written
2846 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2847 if (ret_val != E1000_SUCCESS) {
2848 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2853 new_bank_offset = nvm->flash_bank_size;
2854 old_bank_offset = 0;
2855 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2859 old_bank_offset = nvm->flash_bank_size;
2860 new_bank_offset = 0;
2861 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2866 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2868 * Determine whether to write the value stored
2869 * in the other NVM bank or a modified value stored
2872 if (dev_spec->shadow_ram[i].modified) {
2873 data = dev_spec->shadow_ram[i].value;
2875 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2883 * If the word is 0x13, then make sure the signature bits
2884 * (15:14) are 11b until the commit has completed.
2885 * This will allow us to write 10b which indicates the
2886 * signature is valid. We want to do this after the write
2887 * has completed so that we don't mark the segment valid
2888 * while the write is still in progress
2890 if (i == E1000_ICH_NVM_SIG_WORD)
2891 data |= E1000_ICH_NVM_SIG_MASK;
2893 /* Convert offset to bytes. */
2894 act_offset = (i + new_bank_offset) << 1;
2897 /* Write the bytes to the new bank. */
2898 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2905 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2913 * Don't bother writing the segment valid bits if sector
2914 * programming failed.
2917 DEBUGOUT("Flash commit failed.\n");
2922 * Finally validate the new segment by setting bit 15:14
2923 * to 10b in word 0x13 , this can be done without an
2924 * erase as well since these bits are 11 to start with
2925 * and we need to change bit 14 to 0b
2927 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2928 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2933 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2940 * And invalidate the previously valid segment by setting
2941 * its signature word (0x13) high_byte to 0b. This can be
2942 * done without an erase because flash erase sets all bits
2943 * to 1's. We can write 1's to 0's without an erase
2945 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2946 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2950 /* Great! Everything worked, we can now clear the cached entries. */
2951 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2952 dev_spec->shadow_ram[i].modified = false;
2953 dev_spec->shadow_ram[i].value = 0xFFFF;
2957 nvm->ops.release(hw);
2960 * Reload the EEPROM, or else modifications will not appear
2961 * until after the next adapter reset.
2964 nvm->ops.reload(hw);
2970 DEBUGOUT1("NVM update error: %d\n", ret_val);
2976 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2977 * @hw: pointer to the HW structure
2979 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2980 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2981 * calculated, in which case we need to calculate the checksum and set bit 6.
2983 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2988 u16 valid_csum_mask;
2990 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2993 * Read NVM and check Invalid Image CSUM bit. If this bit is 0,
2994 * the checksum needs to be fixed. This bit is an indication that
2995 * the NVM was prepared by OEM software and did not calculate
2996 * the checksum...a likely scenario.
2998 switch (hw->mac.type) {
3000 word = NVM_FUTURE_INIT_WORD1;
3001 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3005 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3009 if (!(data & valid_csum_mask)) {
3010 data |= valid_csum_mask;
3011 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3014 ret_val = hw->nvm.ops.update(hw);
3019 return e1000_validate_nvm_checksum_generic(hw);
3023 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3024 * @hw: pointer to the HW structure
3025 * @offset: The offset (in bytes) of the byte/word to read.
3026 * @size: Size of data to read, 1=byte 2=word
3027 * @data: The byte(s) to write to the NVM.
3029 * Writes one/two bytes to the NVM using the flash access registers.
3031 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3034 union ich8_hws_flash_status hsfsts;
3035 union ich8_hws_flash_ctrl hsflctl;
3036 u32 flash_linear_addr;
3041 DEBUGFUNC("e1000_write_ich8_data");
3043 if (size < 1 || size > 2 || data > size * 0xff ||
3044 offset > ICH_FLASH_LINEAR_ADDR_MASK)
3045 return -E1000_ERR_NVM;
3047 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3048 hw->nvm.flash_base_addr;
3053 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3054 if (ret_val != E1000_SUCCESS)
3057 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3058 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3059 hsflctl.hsf_ctrl.fldbcount = size - 1;
3060 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3061 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3063 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3066 flash_data = (u32)data & 0x00FF;
3068 flash_data = (u32)data;
3070 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3073 * check if FCERR is set to 1 , if set to 1, clear it
3074 * and try the whole sequence a few more times else done
3076 ret_val = e1000_flash_cycle_ich8lan(hw,
3077 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3078 if (ret_val == E1000_SUCCESS)
3082 * If we're here, then things are most likely
3083 * completely hosed, but if the error condition
3084 * is detected, it won't hurt to give it another
3085 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3087 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3088 if (hsfsts.hsf_status.flcerr)
3089 /* Repeat for some time before giving up. */
3091 if (!hsfsts.hsf_status.flcdone) {
3092 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3095 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3101 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3102 * @hw: pointer to the HW structure
3103 * @offset: The index of the byte to read.
3104 * @data: The byte to write to the NVM.
3106 * Writes a single byte to the NVM using the flash access registers.
3108 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3111 u16 word = (u16)data;
3113 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3115 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3119 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3120 * @hw: pointer to the HW structure
3121 * @offset: The offset of the byte to write.
3122 * @byte: The byte to write to the NVM.
3124 * Writes a single byte to the NVM using the flash access registers.
3125 * Goes through a retry algorithm before giving up.
3127 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3128 u32 offset, u8 byte)
3131 u16 program_retries;
3133 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3135 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3139 for (program_retries = 0; program_retries < 100; program_retries++) {
3140 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3142 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3143 if (ret_val == E1000_SUCCESS)
3146 if (program_retries == 100)
3147 return -E1000_ERR_NVM;
3149 return E1000_SUCCESS;
3153 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3154 * @hw: pointer to the HW structure
3155 * @bank: 0 for first bank, 1 for second bank, etc.
3157 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3158 * bank N is 4096 * N + flash_reg_addr.
3160 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3162 struct e1000_nvm_info *nvm = &hw->nvm;
3163 union ich8_hws_flash_status hsfsts;
3164 union ich8_hws_flash_ctrl hsflctl;
3165 u32 flash_linear_addr;
3166 /* bank size is in 16bit words - adjust to bytes */
3167 u32 flash_bank_size = nvm->flash_bank_size * 2;
3170 s32 j, iteration, sector_size;
3172 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3174 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3177 * Determine HW Sector size: Read BERASE bits of hw flash status
3179 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3180 * consecutive sectors. The start index for the nth Hw sector
3181 * can be calculated as = bank * 4096 + n * 256
3182 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3183 * The start index for the nth Hw sector can be calculated
3185 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3186 * (ich9 only, otherwise error condition)
3187 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3189 switch (hsfsts.hsf_status.berasesz) {
3191 /* Hw sector size 256 */
3192 sector_size = ICH_FLASH_SEG_SIZE_256;
3193 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3196 sector_size = ICH_FLASH_SEG_SIZE_4K;
3200 sector_size = ICH_FLASH_SEG_SIZE_8K;
3204 sector_size = ICH_FLASH_SEG_SIZE_64K;
3208 return -E1000_ERR_NVM;
3211 /* Start with the base address, then add the sector offset. */
3212 flash_linear_addr = hw->nvm.flash_base_addr;
3213 flash_linear_addr += (bank) ? flash_bank_size : 0;
3215 for (j = 0; j < iteration ; j++) {
3218 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3223 * Write a value 11 (block Erase) in Flash
3224 * Cycle field in hw flash control
3226 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3228 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3229 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3233 * Write the last 24 bits of an index within the
3234 * block into Flash Linear address field in Flash
3237 flash_linear_addr += (j * sector_size);
3238 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3241 ret_val = e1000_flash_cycle_ich8lan(hw,
3242 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3243 if (ret_val == E1000_SUCCESS)
3247 * Check if FCERR is set to 1. If 1,
3248 * clear it and try the whole sequence
3249 * a few more times else Done
3251 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3253 if (hsfsts.hsf_status.flcerr)
3254 /* repeat for some time before giving up */
3256 else if (!hsfsts.hsf_status.flcdone)
3258 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3261 return E1000_SUCCESS;
3265 * e1000_valid_led_default_ich8lan - Set the default LED settings
3266 * @hw: pointer to the HW structure
3267 * @data: Pointer to the LED settings
3269 * Reads the LED default settings from the NVM to data. If the NVM LED
3270 * settings is all 0's or F's, set the LED default to a valid LED default
3273 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3277 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3279 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3281 DEBUGOUT("NVM Read Error\n");
3285 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3286 *data = ID_LED_DEFAULT_ICH8LAN;
3288 return E1000_SUCCESS;
3292 * e1000_id_led_init_pchlan - store LED configurations
3293 * @hw: pointer to the HW structure
3295 * PCH does not control LEDs via the LEDCTL register, rather it uses
3296 * the PHY LED configuration register.
3298 * PCH also does not have an "always on" or "always off" mode which
3299 * complicates the ID feature. Instead of using the "on" mode to indicate
3300 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3301 * use "link_up" mode. The LEDs will still ID on request if there is no
3302 * link based on logic in e1000_led_[on|off]_pchlan().
3304 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3306 struct e1000_mac_info *mac = &hw->mac;
3308 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3309 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3310 u16 data, i, temp, shift;
3312 DEBUGFUNC("e1000_id_led_init_pchlan");
3314 /* Get default ID LED modes */
3315 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3319 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3320 mac->ledctl_mode1 = mac->ledctl_default;
3321 mac->ledctl_mode2 = mac->ledctl_default;
3323 for (i = 0; i < 4; i++) {
3324 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3327 case ID_LED_ON1_DEF2:
3328 case ID_LED_ON1_ON2:
3329 case ID_LED_ON1_OFF2:
3330 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3331 mac->ledctl_mode1 |= (ledctl_on << shift);
3333 case ID_LED_OFF1_DEF2:
3334 case ID_LED_OFF1_ON2:
3335 case ID_LED_OFF1_OFF2:
3336 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3337 mac->ledctl_mode1 |= (ledctl_off << shift);
3344 case ID_LED_DEF1_ON2:
3345 case ID_LED_ON1_ON2:
3346 case ID_LED_OFF1_ON2:
3347 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3348 mac->ledctl_mode2 |= (ledctl_on << shift);
3350 case ID_LED_DEF1_OFF2:
3351 case ID_LED_ON1_OFF2:
3352 case ID_LED_OFF1_OFF2:
3353 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3354 mac->ledctl_mode2 |= (ledctl_off << shift);
3362 return E1000_SUCCESS;
3366 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3367 * @hw: pointer to the HW structure
3369 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3370 * register, so the the bus width is hard coded.
3372 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3374 struct e1000_bus_info *bus = &hw->bus;
3377 DEBUGFUNC("e1000_get_bus_info_ich8lan");
3379 ret_val = e1000_get_bus_info_pcie_generic(hw);
3382 * ICH devices are "PCI Express"-ish. They have
3383 * a configuration space, but do not contain
3384 * PCI Express Capability registers, so bus width
3385 * must be hardcoded.
3387 if (bus->width == e1000_bus_width_unknown)
3388 bus->width = e1000_bus_width_pcie_x1;
3394 * e1000_reset_hw_ich8lan - Reset the hardware
3395 * @hw: pointer to the HW structure
3397 * Does a full reset of the hardware which includes a reset of the PHY and
3400 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3402 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3407 DEBUGFUNC("e1000_reset_hw_ich8lan");
3410 * Prevent the PCI-E bus from sticking if there is no TLP connection
3411 * on the last TLP read/write transaction when MAC is reset.
3413 ret_val = e1000_disable_pcie_master_generic(hw);
3415 DEBUGOUT("PCI-E Master disable polling has failed.\n");
3417 DEBUGOUT("Masking off all interrupts\n");
3418 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3421 * Disable the Transmit and Receive units. Then delay to allow
3422 * any pending transactions to complete before we hit the MAC
3423 * with the global reset.
3425 E1000_WRITE_REG(hw, E1000_RCTL, 0);
3426 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3427 E1000_WRITE_FLUSH(hw);
3431 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3432 if (hw->mac.type == e1000_ich8lan) {
3433 /* Set Tx and Rx buffer allocation to 8k apiece. */
3434 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3435 /* Set Packet Buffer Size to 16k. */
3436 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3439 if (hw->mac.type == e1000_pchlan) {
3440 /* Save the NVM K1 bit setting*/
3441 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3445 if (kum_cfg & E1000_NVM_K1_ENABLE)
3446 dev_spec->nvm_k1_enabled = true;
3448 dev_spec->nvm_k1_enabled = false;
3451 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3453 if (!hw->phy.ops.check_reset_block(hw)) {
3455 * Full-chip reset requires MAC and PHY reset at the same
3456 * time to make sure the interface between MAC and the
3457 * external PHY is reset.
3459 ctrl |= E1000_CTRL_PHY_RST;
3462 * Gate automatic PHY configuration by hardware on
3465 if ((hw->mac.type == e1000_pch2lan) &&
3466 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3467 e1000_gate_hw_phy_config_ich8lan(hw, true);
3469 ret_val = e1000_acquire_swflag_ich8lan(hw);
3470 DEBUGOUT("Issuing a global reset to ich8lan\n");
3471 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3472 /* cannot issue a flush here because it hangs the hardware */
3475 /* Set Phy Config Counter to 50msec */
3476 if (hw->mac.type == e1000_pch2lan) {
3477 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
3478 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3479 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3480 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
3484 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
3486 if (ctrl & E1000_CTRL_PHY_RST) {
3487 ret_val = hw->phy.ops.get_cfg_done(hw);
3491 ret_val = e1000_post_phy_reset_ich8lan(hw);
3497 * For PCH, this write will make sure that any noise
3498 * will be detected as a CRC error and be dropped rather than show up
3499 * as a bad packet to the DMA engine.
3501 if (hw->mac.type == e1000_pchlan)
3502 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3504 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3505 E1000_READ_REG(hw, E1000_ICR);
3507 reg = E1000_READ_REG(hw, E1000_KABGTXD);
3508 reg |= E1000_KABGTXD_BGSQLBIAS;
3509 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
3511 return E1000_SUCCESS;
3515 * e1000_init_hw_ich8lan - Initialize the hardware
3516 * @hw: pointer to the HW structure
3518 * Prepares the hardware for transmit and receive by doing the following:
3519 * - initialize hardware bits
3520 * - initialize LED identification
3521 * - setup receive address registers
3522 * - setup flow control
3523 * - setup transmit descriptors
3524 * - clear statistics
3526 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3528 struct e1000_mac_info *mac = &hw->mac;
3529 u32 ctrl_ext, txdctl, snoop;
3533 DEBUGFUNC("e1000_init_hw_ich8lan");
3535 e1000_initialize_hw_bits_ich8lan(hw);
3537 /* Initialize identification LED */
3538 ret_val = mac->ops.id_led_init(hw);
3540 DEBUGOUT("Error initializing identification LED\n");
3541 /* This is not fatal and we should not stop init due to this */
3543 /* Setup the receive address. */
3544 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3546 /* Zero out the Multicast HASH table */
3547 DEBUGOUT("Zeroing the MTA\n");
3548 for (i = 0; i < mac->mta_reg_count; i++)
3549 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3552 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3553 * the ME. Disable wakeup by clearing the host wakeup bit.
3554 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3556 if (hw->phy.type == e1000_phy_82578) {
3557 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3558 i &= ~BM_WUC_HOST_WU_BIT;
3559 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3560 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3565 /* Setup link and flow control */
3566 ret_val = mac->ops.setup_link(hw);
3568 /* Set the transmit descriptor write-back policy for both queues */
3569 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3570 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3571 E1000_TXDCTL_FULL_TX_DESC_WB;
3572 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3573 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3574 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3575 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3576 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3577 E1000_TXDCTL_FULL_TX_DESC_WB;
3578 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3579 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3580 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3583 * ICH8 has opposite polarity of no_snoop bits.
3584 * By default, we should use snoop behavior.
3586 if (mac->type == e1000_ich8lan)
3587 snoop = PCIE_ICH8_SNOOP_ALL;
3589 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3590 e1000_set_pcie_no_snoop_generic(hw, snoop);
3592 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3593 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3594 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3597 * Clear all of the statistics registers (clear on read). It is
3598 * important that we do this after we have tried to establish link
3599 * because the symbol error count will increment wildly if there
3602 e1000_clear_hw_cntrs_ich8lan(hw);
3608 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3609 * @hw: pointer to the HW structure
3611 * Sets/Clears required hardware bits necessary for correctly setting up the
3612 * hardware for transmit and receive.
3614 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3618 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3620 /* Extended Device Control */
3621 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3623 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3624 if (hw->mac.type >= e1000_pchlan)
3625 reg |= E1000_CTRL_EXT_PHYPDEN;
3626 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3628 /* Transmit Descriptor Control 0 */
3629 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3631 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3633 /* Transmit Descriptor Control 1 */
3634 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3636 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3638 /* Transmit Arbitration Control 0 */
3639 reg = E1000_READ_REG(hw, E1000_TARC(0));
3640 if (hw->mac.type == e1000_ich8lan)
3641 reg |= (1 << 28) | (1 << 29);
3642 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3643 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3645 /* Transmit Arbitration Control 1 */
3646 reg = E1000_READ_REG(hw, E1000_TARC(1));
3647 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3651 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3652 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3655 if (hw->mac.type == e1000_ich8lan) {
3656 reg = E1000_READ_REG(hw, E1000_STATUS);
3658 E1000_WRITE_REG(hw, E1000_STATUS, reg);
3662 * work-around descriptor data corruption issue during nfs v2 udp
3663 * traffic, just disable the nfs filtering capability
3665 reg = E1000_READ_REG(hw, E1000_RFCTL);
3666 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3668 * Disable IPv6 extension header parsing because some malformed
3669 * IPv6 headers can hang the Rx.
3671 if (hw->mac.type == e1000_ich8lan)
3672 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3673 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3679 * e1000_setup_link_ich8lan - Setup flow control and link settings
3680 * @hw: pointer to the HW structure
3682 * Determines which flow control settings to use, then configures flow
3683 * control. Calls the appropriate media-specific link configuration
3684 * function. Assuming the adapter has a valid link partner, a valid link
3685 * should be established. Assumes the hardware has previously been reset
3686 * and the transmitter and receiver are not enabled.
3688 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3692 DEBUGFUNC("e1000_setup_link_ich8lan");
3694 if (hw->phy.ops.check_reset_block(hw))
3695 return E1000_SUCCESS;
3698 * ICH parts do not have a word in the NVM to determine
3699 * the default flow control setting, so we explicitly
3702 if (hw->fc.requested_mode == e1000_fc_default)
3703 hw->fc.requested_mode = e1000_fc_full;
3706 * Save off the requested flow control mode for use later. Depending
3707 * on the link partner's capabilities, we may or may not use this mode.
3709 hw->fc.current_mode = hw->fc.requested_mode;
3711 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3712 hw->fc.current_mode);
3714 /* Continue to configure the copper link. */
3715 ret_val = hw->mac.ops.setup_physical_interface(hw);
3719 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3720 if ((hw->phy.type == e1000_phy_82578) ||
3721 (hw->phy.type == e1000_phy_82579) ||
3722 (hw->phy.type == e1000_phy_i217) ||
3723 (hw->phy.type == e1000_phy_82577)) {
3724 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3726 ret_val = hw->phy.ops.write_reg(hw,
3727 PHY_REG(BM_PORT_CTRL_PAGE, 27),
3733 return e1000_set_fc_watermarks_generic(hw);
3737 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3738 * @hw: pointer to the HW structure
3740 * Configures the kumeran interface to the PHY to wait the appropriate time
3741 * when polling the PHY, then call the generic setup_copper_link to finish
3742 * configuring the copper link.
3744 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3750 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3752 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3753 ctrl |= E1000_CTRL_SLU;
3754 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3755 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3758 * Set the mac to wait the maximum time between each iteration
3759 * and increase the max iterations when polling the phy;
3760 * this fixes erroneous timeouts at 10Mbps.
3762 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3766 ret_val = e1000_read_kmrn_reg_generic(hw,
3767 E1000_KMRNCTRLSTA_INBAND_PARAM,
3772 ret_val = e1000_write_kmrn_reg_generic(hw,
3773 E1000_KMRNCTRLSTA_INBAND_PARAM,
3778 switch (hw->phy.type) {
3779 case e1000_phy_igp_3:
3780 ret_val = e1000_copper_link_setup_igp(hw);
3785 case e1000_phy_82578:
3786 ret_val = e1000_copper_link_setup_m88(hw);
3790 case e1000_phy_82577:
3791 case e1000_phy_82579:
3792 case e1000_phy_i217:
3793 ret_val = e1000_copper_link_setup_82577(hw);
3798 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3803 reg_data &= ~IFE_PMC_AUTO_MDIX;
3805 switch (hw->phy.mdix) {
3807 reg_data &= ~IFE_PMC_FORCE_MDIX;
3810 reg_data |= IFE_PMC_FORCE_MDIX;
3814 reg_data |= IFE_PMC_AUTO_MDIX;
3817 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3826 return e1000_setup_copper_link_generic(hw);
3830 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3831 * @hw: pointer to the HW structure
3832 * @speed: pointer to store current link speed
3833 * @duplex: pointer to store the current link duplex
3835 * Calls the generic get_speed_and_duplex to retrieve the current link
3836 * information and then calls the Kumeran lock loss workaround for links at
3839 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3844 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3846 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3850 if ((hw->mac.type == e1000_ich8lan) &&
3851 (hw->phy.type == e1000_phy_igp_3) &&
3852 (*speed == SPEED_1000)) {
3853 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3860 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3861 * @hw: pointer to the HW structure
3863 * Work-around for 82566 Kumeran PCS lock loss:
3864 * On link status change (i.e. PCI reset, speed change) and link is up and
3866 * 0) if workaround is optionally disabled do nothing
3867 * 1) wait 1ms for Kumeran link to come up
3868 * 2) check Kumeran Diagnostic register PCS lock loss bit
3869 * 3) if not set the link is locked (all is good), otherwise...
3871 * 5) repeat up to 10 times
3872 * Note: this is only called for IGP3 copper when speed is 1gb.
3874 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3876 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3882 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3884 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3885 return E1000_SUCCESS;
3888 * Make sure link is up before proceeding. If not just return.
3889 * Attempting this while link is negotiating fouled up link
3892 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3894 return E1000_SUCCESS;
3896 for (i = 0; i < 10; i++) {
3897 /* read once to clear */
3898 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3901 /* and again to get new status */
3902 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3906 /* check for PCS lock */
3907 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3908 return E1000_SUCCESS;
3910 /* Issue PHY reset */
3911 hw->phy.ops.reset(hw);
3914 /* Disable GigE link negotiation */
3915 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3916 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3917 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3918 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3921 * Call gig speed drop workaround on Gig disable before accessing
3924 e1000_gig_downshift_workaround_ich8lan(hw);
3926 /* unable to acquire PCS lock */
3927 return -E1000_ERR_PHY;
3931 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3932 * @hw: pointer to the HW structure
3933 * @state: boolean value used to set the current Kumeran workaround state
3935 * If ICH8, set the current Kumeran workaround state (enabled - true
3936 * /disabled - false).
3938 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3941 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3943 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3945 if (hw->mac.type != e1000_ich8lan) {
3946 DEBUGOUT("Workaround applies to ICH8 only.\n");
3950 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3956 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3957 * @hw: pointer to the HW structure
3959 * Workaround for 82566 power-down on D3 entry:
3960 * 1) disable gigabit link
3961 * 2) write VR power-down enable
3963 * Continue if successful, else issue LCD reset and repeat
3965 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3971 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3973 if (hw->phy.type != e1000_phy_igp_3)
3976 /* Try the workaround twice (if needed) */
3979 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3980 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3981 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3982 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3985 * Call gig speed drop workaround on Gig disable before
3986 * accessing any PHY registers
3988 if (hw->mac.type == e1000_ich8lan)
3989 e1000_gig_downshift_workaround_ich8lan(hw);
3991 /* Write VR power-down enable */
3992 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3993 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3994 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3995 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3997 /* Read it back and test */
3998 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3999 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4000 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4003 /* Issue PHY reset and repeat at most one more time */
4004 reg = E1000_READ_REG(hw, E1000_CTRL);
4005 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4011 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4012 * @hw: pointer to the HW structure
4014 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4015 * LPLU, Gig disable, MDIC PHY reset):
4016 * 1) Set Kumeran Near-end loopback
4017 * 2) Clear Kumeran Near-end loopback
4018 * Should only be called for ICH8[m] devices with any 1G Phy.
4020 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4025 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4027 if ((hw->mac.type != e1000_ich8lan) ||
4028 (hw->phy.type == e1000_phy_ife))
4031 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4035 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4036 ret_val = e1000_write_kmrn_reg_generic(hw,
4037 E1000_KMRNCTRLSTA_DIAG_OFFSET,
4041 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4042 ret_val = e1000_write_kmrn_reg_generic(hw,
4043 E1000_KMRNCTRLSTA_DIAG_OFFSET,
4048 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4049 * @hw: pointer to the HW structure
4051 * During S0 to Sx transition, it is possible the link remains at gig
4052 * instead of negotiating to a lower speed. Before going to Sx, set
4053 * 'Gig Disable' to force link speed negotiation to a lower speed based on
4054 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
4055 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4056 * needs to be written.
4057 * Parts that support (and are linked to a partner which support) EEE in
4058 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4059 * than 10Mbps w/o EEE.
4061 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4063 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4067 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4069 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4070 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4071 if (hw->phy.type == e1000_phy_i217) {
4074 ret_val = hw->phy.ops.acquire(hw);
4078 if (!dev_spec->eee_disable) {
4082 e1000_read_emi_reg_locked(hw,
4083 I217_EEE_ADVERTISEMENT,
4089 * Disable LPLU if both link partners support 100BaseT
4090 * EEE and 100Full is advertised on both ends of the
4093 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4094 (dev_spec->eee_lp_ability &
4095 I82579_EEE_100_SUPPORTED) &&
4096 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4097 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4098 E1000_PHY_CTRL_NOND0A_LPLU);
4102 * For i217 Intel Rapid Start Technology support,
4103 * when the system is going into Sx and no manageability engine
4104 * is present, the driver must configure proxy to reset only on
4105 * power good. LPI (Low Power Idle) state must also reset only
4106 * on power good, as well as the MTA (Multicast table array).
4107 * The SMBus release must also be disabled on LCD reset.
4109 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4110 E1000_ICH_FWSM_FW_VALID)) {
4112 /* Enable proxy to reset only on power good. */
4113 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4115 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4116 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4120 * Set bit enable LPI (EEE) to reset only on
4123 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4124 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4125 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4127 /* Disable the SMB release on LCD reset. */
4128 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4129 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4130 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4134 * Enable MTA to reset for Intel Rapid Start Technology
4137 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4138 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4139 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4142 hw->phy.ops.release(hw);
4145 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4147 if (hw->mac.type == e1000_ich8lan)
4148 e1000_gig_downshift_workaround_ich8lan(hw);
4150 if (hw->mac.type >= e1000_pchlan) {
4151 e1000_oem_bits_config_ich8lan(hw, false);
4153 /* Reset PHY to activate OEM bits on 82577/8 */
4154 if (hw->mac.type == e1000_pchlan)
4155 e1000_phy_hw_reset_generic(hw);
4157 ret_val = hw->phy.ops.acquire(hw);
4160 e1000_write_smbus_addr(hw);
4161 hw->phy.ops.release(hw);
4168 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4169 * @hw: pointer to the HW structure
4171 * During Sx to S0 transitions on non-managed devices or managed devices
4172 * on which PHY resets are not blocked, if the PHY registers cannot be
4173 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
4175 * On i217, setup Intel Rapid Start Technology.
4177 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4181 DEBUGFUNC("e1000_resume_workarounds_pchlan");
4183 if (hw->mac.type < e1000_pch2lan)
4186 ret_val = e1000_init_phy_workarounds_pchlan(hw);
4188 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4193 * For i217 Intel Rapid Start Technology support when the system
4194 * is transitioning from Sx and no manageability engine is present
4195 * configure SMBus to restore on reset, disable proxy, and enable
4196 * the reset on MTA (Multicast table array).
4198 if (hw->phy.type == e1000_phy_i217) {
4201 ret_val = hw->phy.ops.acquire(hw);
4203 DEBUGOUT("Failed to setup iRST\n");
4207 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4208 E1000_ICH_FWSM_FW_VALID)) {
4210 * Restore clear on SMB if no manageability engine
4213 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4217 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4218 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4221 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4223 /* Enable reset on MTA */
4224 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4228 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4229 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4232 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4233 hw->phy.ops.release(hw);
4238 * e1000_cleanup_led_ich8lan - Restore the default LED operation
4239 * @hw: pointer to the HW structure
4241 * Return the LED back to the default configuration.
4243 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4245 DEBUGFUNC("e1000_cleanup_led_ich8lan");
4247 if (hw->phy.type == e1000_phy_ife)
4248 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4251 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4252 return E1000_SUCCESS;
4256 * e1000_led_on_ich8lan - Turn LEDs on
4257 * @hw: pointer to the HW structure
4261 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4263 DEBUGFUNC("e1000_led_on_ich8lan");
4265 if (hw->phy.type == e1000_phy_ife)
4266 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4267 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4269 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
4270 return E1000_SUCCESS;
4274 * e1000_led_off_ich8lan - Turn LEDs off
4275 * @hw: pointer to the HW structure
4277 * Turn off the LEDs.
4279 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4281 DEBUGFUNC("e1000_led_off_ich8lan");
4283 if (hw->phy.type == e1000_phy_ife)
4284 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4285 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
4287 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
4288 return E1000_SUCCESS;
4292 * e1000_setup_led_pchlan - Configures SW controllable LED
4293 * @hw: pointer to the HW structure
4295 * This prepares the SW controllable LED for use.
4297 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4299 DEBUGFUNC("e1000_setup_led_pchlan");
4301 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4302 (u16)hw->mac.ledctl_mode1);
4306 * e1000_cleanup_led_pchlan - Restore the default LED operation
4307 * @hw: pointer to the HW structure
4309 * Return the LED back to the default configuration.
4311 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4313 DEBUGFUNC("e1000_cleanup_led_pchlan");
4315 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4316 (u16)hw->mac.ledctl_default);
4320 * e1000_led_on_pchlan - Turn LEDs on
4321 * @hw: pointer to the HW structure
4325 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4327 u16 data = (u16)hw->mac.ledctl_mode2;
4330 DEBUGFUNC("e1000_led_on_pchlan");
4333 * If no link, then turn LED on by setting the invert bit
4334 * for each LED that's mode is "link_up" in ledctl_mode2.
4336 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4337 for (i = 0; i < 3; i++) {
4338 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4339 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4340 E1000_LEDCTL_MODE_LINK_UP)
4342 if (led & E1000_PHY_LED0_IVRT)
4343 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4345 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4349 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4353 * e1000_led_off_pchlan - Turn LEDs off
4354 * @hw: pointer to the HW structure
4356 * Turn off the LEDs.
4358 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4360 u16 data = (u16)hw->mac.ledctl_mode1;
4363 DEBUGFUNC("e1000_led_off_pchlan");
4366 * If no link, then turn LED off by clearing the invert bit
4367 * for each LED that's mode is "link_up" in ledctl_mode1.
4369 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4370 for (i = 0; i < 3; i++) {
4371 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4372 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4373 E1000_LEDCTL_MODE_LINK_UP)
4375 if (led & E1000_PHY_LED0_IVRT)
4376 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4378 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4382 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4386 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4387 * @hw: pointer to the HW structure
4389 * Read appropriate register for the config done bit for completion status
4390 * and configure the PHY through s/w for EEPROM-less parts.
4392 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4393 * config done bit, so only an error is logged and continues. If we were
4394 * to return with error, EEPROM-less silicon would not be able to be reset
4397 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4399 s32 ret_val = E1000_SUCCESS;
4403 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4405 e1000_get_cfg_done_generic(hw);
4407 /* Wait for indication from h/w that it has completed basic config */
4408 if (hw->mac.type >= e1000_ich10lan) {
4409 e1000_lan_init_done_ich8lan(hw);
4411 ret_val = e1000_get_auto_rd_done_generic(hw);
4414 * When auto config read does not complete, do not
4415 * return with an error. This can happen in situations
4416 * where there is no eeprom and prevents getting link.
4418 DEBUGOUT("Auto Read Done did not complete\n");
4419 ret_val = E1000_SUCCESS;
4423 /* Clear PHY Reset Asserted bit */
4424 status = E1000_READ_REG(hw, E1000_STATUS);
4425 if (status & E1000_STATUS_PHYRA)
4426 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4428 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4430 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4431 if (hw->mac.type <= e1000_ich9lan) {
4432 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
4433 (hw->phy.type == e1000_phy_igp_3)) {
4434 e1000_phy_init_script_igp3(hw);
4437 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4438 /* Maybe we should do a basic PHY config */
4439 DEBUGOUT("EEPROM not present\n");
4440 ret_val = -E1000_ERR_CONFIG;
4448 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4449 * @hw: pointer to the HW structure
4451 * In the case of a PHY power down to save power, or to turn off link during a
4452 * driver unload, or wake on lan is not enabled, remove the link.
4454 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4456 /* If the management interface is not enabled, then power down */
4457 if (!(hw->mac.ops.check_mng_mode(hw) ||
4458 hw->phy.ops.check_reset_block(hw)))
4459 e1000_power_down_phy_copper(hw);
4465 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4466 * @hw: pointer to the HW structure
4468 * Clears hardware counters specific to the silicon family and calls
4469 * clear_hw_cntrs_generic to clear all general purpose counters.
4471 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4476 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4478 e1000_clear_hw_cntrs_base_generic(hw);
4480 E1000_READ_REG(hw, E1000_ALGNERRC);
4481 E1000_READ_REG(hw, E1000_RXERRC);
4482 E1000_READ_REG(hw, E1000_TNCRS);
4483 E1000_READ_REG(hw, E1000_CEXTERR);
4484 E1000_READ_REG(hw, E1000_TSCTC);
4485 E1000_READ_REG(hw, E1000_TSCTFC);
4487 E1000_READ_REG(hw, E1000_MGTPRC);
4488 E1000_READ_REG(hw, E1000_MGTPDC);
4489 E1000_READ_REG(hw, E1000_MGTPTC);
4491 E1000_READ_REG(hw, E1000_IAC);
4492 E1000_READ_REG(hw, E1000_ICRXOC);
4494 /* Clear PHY statistics registers */
4495 if ((hw->phy.type == e1000_phy_82578) ||
4496 (hw->phy.type == e1000_phy_82579) ||
4497 (hw->phy.type == e1000_phy_i217) ||
4498 (hw->phy.type == e1000_phy_82577)) {
4499 ret_val = hw->phy.ops.acquire(hw);
4502 ret_val = hw->phy.ops.set_page(hw,
4503 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4506 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4507 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4508 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4509 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4510 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4511 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4512 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4513 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4514 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4515 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4516 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4517 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4518 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4519 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4521 hw->phy.ops.release(hw);