1 /*******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 /* 82562G 10/100 Network Connection
35 * 82562G-2 10/100 Network Connection
36 * 82562GT 10/100 Network Connection
37 * 82562GT-2 10/100 Network Connection
38 * 82562V 10/100 Network Connection
39 * 82562V-2 10/100 Network Connection
40 * 82566DC-2 Gigabit Network Connection
41 * 82566DC Gigabit Network Connection
42 * 82566DM-2 Gigabit Network Connection
43 * 82566DM Gigabit Network Connection
44 * 82566MC Gigabit Network Connection
45 * 82566MM Gigabit Network Connection
46 * 82567LM Gigabit Network Connection
47 * 82567LF Gigabit Network Connection
48 * 82567V Gigabit Network Connection
49 * 82567LM-2 Gigabit Network Connection
50 * 82567LF-2 Gigabit Network Connection
51 * 82567V-2 Gigabit Network Connection
52 * 82567LF-3 Gigabit Network Connection
53 * 82567LM-3 Gigabit Network Connection
54 * 82567LM-4 Gigabit Network Connection
55 * 82577LM Gigabit Network Connection
56 * 82577LC Gigabit Network Connection
57 * 82578DM Gigabit Network Connection
58 * 82578DC Gigabit Network Connection
59 * 82579LM Gigabit Network Connection
60 * 82579V Gigabit Network Connection
63 #include "e1000_api.h"
65 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
66 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
67 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
68 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
69 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
70 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
71 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
72 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
73 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
74 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
75 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
77 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
79 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
80 u16 words, u16 *data);
81 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
82 u16 words, u16 *data);
83 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
84 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
85 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
87 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
88 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
89 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
90 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
91 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
92 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
93 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
94 u16 *speed, u16 *duplex);
95 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
96 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
97 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
98 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
99 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
100 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
101 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw);
102 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw);
103 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
104 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
105 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
106 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
107 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
108 u32 offset, u8 *data);
109 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
111 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
112 u32 offset, u16 *data);
113 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
114 u32 offset, u8 byte);
115 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
116 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
117 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
118 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
119 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
120 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
122 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
123 /* Offset 04h HSFSTS */
124 union ich8_hws_flash_status {
126 u16 flcdone:1; /* bit 0 Flash Cycle Done */
127 u16 flcerr:1; /* bit 1 Flash Cycle Error */
128 u16 dael:1; /* bit 2 Direct Access error Log */
129 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
130 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
131 u16 reserved1:2; /* bit 13:6 Reserved */
132 u16 reserved2:6; /* bit 13:6 Reserved */
133 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
134 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
139 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
140 /* Offset 06h FLCTL */
141 union ich8_hws_flash_ctrl {
142 struct ich8_hsflctl {
143 u16 flcgo:1; /* 0 Flash Cycle Go */
144 u16 flcycle:2; /* 2:1 Flash Cycle */
145 u16 reserved:5; /* 7:3 Reserved */
146 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
147 u16 flockdn:6; /* 15:10 Reserved */
152 /* ICH Flash Region Access Permissions */
153 union ich8_hws_flash_regacc {
155 u32 grra:8; /* 0:7 GbE region Read Access */
156 u32 grwa:8; /* 8:15 GbE region Write Access */
157 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
158 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
164 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
165 * @hw: pointer to the HW structure
167 * Test access to the PHY registers by reading the PHY ID registers. If
168 * the PHY ID is already known (e.g. resume path) compare it with known ID,
169 * otherwise assume the read PHY ID is correct if it is valid.
171 * Assumes the sw/fw/hw semaphore is already acquired.
173 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
180 for (retry_count = 0; retry_count < 2; retry_count++) {
181 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
182 if (ret_val || (phy_reg == 0xFFFF))
184 phy_id = (u32)(phy_reg << 16);
186 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
187 if (ret_val || (phy_reg == 0xFFFF)) {
191 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
196 if (hw->phy.id == phy_id)
200 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
205 * In case the PHY needs to be in mdio slow mode,
206 * set slow mode and try to get the PHY id again.
208 hw->phy.ops.release(hw);
209 ret_val = e1000_set_mdio_slow_mode_hv(hw);
211 ret_val = e1000_get_phy_id(hw);
212 hw->phy.ops.acquire(hw);
218 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
219 * @hw: pointer to the HW structure
221 * Workarounds/flow necessary for PHY initialization during driver load
224 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
226 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
229 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
231 ret_val = hw->phy.ops.acquire(hw);
233 DEBUGOUT("Failed to initialize PHY flow\n");
238 * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
239 * inaccessible and resetting the PHY is not blocked, toggle the
240 * LANPHYPC Value bit to force the interconnect to PCIe mode.
242 switch (hw->mac.type) {
245 * Gate automatic PHY configuration by hardware on
248 if ((hw->mac.type == e1000_pch2lan) &&
249 !(fwsm & E1000_ICH_FWSM_FW_VALID))
250 e1000_gate_hw_phy_config_ich8lan(hw, true);
252 if (e1000_phy_is_accessible_pchlan(hw)) {
258 if ((hw->mac.type == e1000_pchlan) &&
259 (fwsm & E1000_ICH_FWSM_FW_VALID))
262 if (hw->phy.ops.check_reset_block(hw)) {
263 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
267 DEBUGOUT("Toggling LANPHYPC\n");
269 /* Set Phy Config Counter to 50msec */
270 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
271 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
272 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
273 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
275 /* Toggle LANPHYPC Value bit */
276 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
277 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
278 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
279 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
280 E1000_WRITE_FLUSH(hw);
282 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
283 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
284 E1000_WRITE_FLUSH(hw);
291 hw->phy.ops.release(hw);
294 * Reset the PHY before any access to it. Doing so, ensures
295 * that the PHY is in a known good state before we read/write
296 * PHY registers. The generic reset is sufficient here,
297 * because we haven't determined the PHY type yet.
299 ret_val = e1000_phy_hw_reset_generic(hw);
301 /* Ungate automatic PHY configuration on non-managed 82579 */
302 if ((hw->mac.type == e1000_pch2lan) &&
303 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
305 e1000_gate_hw_phy_config_ich8lan(hw, false);
312 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
313 * @hw: pointer to the HW structure
315 * Initialize family-specific PHY parameters and function pointers.
317 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
319 struct e1000_phy_info *phy = &hw->phy;
322 DEBUGFUNC("e1000_init_phy_params_pchlan");
325 phy->reset_delay_us = 100;
327 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
328 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
329 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
330 phy->ops.set_page = e1000_set_page_igp;
331 phy->ops.read_reg = e1000_read_phy_reg_hv;
332 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
333 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
334 phy->ops.release = e1000_release_swflag_ich8lan;
335 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
336 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
337 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
338 phy->ops.write_reg = e1000_write_phy_reg_hv;
339 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
340 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
341 phy->ops.power_up = e1000_power_up_phy_copper;
342 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
343 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
345 phy->id = e1000_phy_unknown;
347 ret_val = e1000_init_phy_workarounds_pchlan(hw);
351 if (phy->id == e1000_phy_unknown)
352 switch (hw->mac.type) {
354 ret_val = e1000_get_phy_id(hw);
357 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
362 /* In case the PHY needs to be in mdio slow mode,
363 * set slow mode and try to get the PHY id again.
365 ret_val = e1000_set_mdio_slow_mode_hv(hw);
368 ret_val = e1000_get_phy_id(hw);
373 phy->type = e1000_get_phy_type_from_id(phy->id);
376 case e1000_phy_82577:
377 case e1000_phy_82579:
379 phy->ops.check_polarity = e1000_check_polarity_82577;
380 phy->ops.force_speed_duplex =
381 e1000_phy_force_speed_duplex_82577;
382 phy->ops.get_cable_length = e1000_get_cable_length_82577;
383 phy->ops.get_info = e1000_get_phy_info_82577;
384 phy->ops.commit = e1000_phy_sw_reset_generic;
386 case e1000_phy_82578:
387 phy->ops.check_polarity = e1000_check_polarity_m88;
388 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
389 phy->ops.get_cable_length = e1000_get_cable_length_m88;
390 phy->ops.get_info = e1000_get_phy_info_m88;
393 ret_val = -E1000_ERR_PHY;
401 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
402 * @hw: pointer to the HW structure
404 * Initialize family-specific PHY parameters and function pointers.
406 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
408 struct e1000_phy_info *phy = &hw->phy;
412 DEBUGFUNC("e1000_init_phy_params_ich8lan");
415 phy->reset_delay_us = 100;
417 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
418 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
419 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
420 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
421 phy->ops.read_reg = e1000_read_phy_reg_igp;
422 phy->ops.release = e1000_release_swflag_ich8lan;
423 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
424 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
425 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
426 phy->ops.write_reg = e1000_write_phy_reg_igp;
427 phy->ops.power_up = e1000_power_up_phy_copper;
428 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
430 /* We may need to do this twice - once for IGP and if that fails,
431 * we'll set BM func pointers and try again
433 ret_val = e1000_determine_phy_address(hw);
435 phy->ops.write_reg = e1000_write_phy_reg_bm;
436 phy->ops.read_reg = e1000_read_phy_reg_bm;
437 ret_val = e1000_determine_phy_address(hw);
439 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
445 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
448 ret_val = e1000_get_phy_id(hw);
455 case IGP03E1000_E_PHY_ID:
456 phy->type = e1000_phy_igp_3;
457 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
458 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
459 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
460 phy->ops.get_info = e1000_get_phy_info_igp;
461 phy->ops.check_polarity = e1000_check_polarity_igp;
462 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
465 case IFE_PLUS_E_PHY_ID:
467 phy->type = e1000_phy_ife;
468 phy->autoneg_mask = E1000_ALL_NOT_GIG;
469 phy->ops.get_info = e1000_get_phy_info_ife;
470 phy->ops.check_polarity = e1000_check_polarity_ife;
471 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
473 case BME1000_E_PHY_ID:
474 phy->type = e1000_phy_bm;
475 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
476 phy->ops.read_reg = e1000_read_phy_reg_bm;
477 phy->ops.write_reg = e1000_write_phy_reg_bm;
478 phy->ops.commit = e1000_phy_sw_reset_generic;
479 phy->ops.get_info = e1000_get_phy_info_m88;
480 phy->ops.check_polarity = e1000_check_polarity_m88;
481 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
484 return -E1000_ERR_PHY;
488 return E1000_SUCCESS;
492 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
493 * @hw: pointer to the HW structure
495 * Initialize family-specific NVM parameters and function
498 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
500 struct e1000_nvm_info *nvm = &hw->nvm;
501 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
502 u32 gfpreg, sector_base_addr, sector_end_addr;
505 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
507 /* Can't read flash registers if the register set isn't mapped. */
508 if (!hw->flash_address) {
509 DEBUGOUT("ERROR: Flash registers not mapped\n");
510 return -E1000_ERR_CONFIG;
513 nvm->type = e1000_nvm_flash_sw;
515 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
517 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
518 * Add 1 to sector_end_addr since this sector is included in
521 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
522 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
524 /* flash_base_addr is byte-aligned */
525 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
527 /* find total size of the NVM, then cut in half since the total
528 * size represents two separate NVM banks.
530 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
531 << FLASH_SECTOR_ADDR_SHIFT);
532 nvm->flash_bank_size /= 2;
533 /* Adjust to word count */
534 nvm->flash_bank_size /= sizeof(u16);
536 nvm->word_size = E1000_SHADOW_RAM_WORDS;
538 /* Clear shadow ram */
539 for (i = 0; i < nvm->word_size; i++) {
540 dev_spec->shadow_ram[i].modified = false;
541 dev_spec->shadow_ram[i].value = 0xFFFF;
544 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
545 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
547 /* Function Pointers */
548 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
549 nvm->ops.release = e1000_release_nvm_ich8lan;
550 nvm->ops.read = e1000_read_nvm_ich8lan;
551 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
552 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
553 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
554 nvm->ops.write = e1000_write_nvm_ich8lan;
556 return E1000_SUCCESS;
560 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
561 * @hw: pointer to the HW structure
563 * Initialize family-specific MAC parameters and function
566 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
568 struct e1000_mac_info *mac = &hw->mac;
570 DEBUGFUNC("e1000_init_mac_params_ich8lan");
572 /* Set media type function pointer */
573 hw->phy.media_type = e1000_media_type_copper;
575 /* Set mta register count */
576 mac->mta_reg_count = 32;
577 /* Set rar entry count */
578 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
579 if (mac->type == e1000_ich8lan)
580 mac->rar_entry_count--;
581 /* Set if part includes ASF firmware */
582 mac->asf_firmware_present = true;
584 mac->has_fwsm = true;
585 /* ARC subsystem not supported */
586 mac->arc_subsystem_valid = false;
587 /* Adaptive IFS supported */
588 mac->adaptive_ifs = true;
590 /* Function pointers */
592 /* bus type/speed/width */
593 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
595 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
597 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
598 /* hw initialization */
599 mac->ops.init_hw = e1000_init_hw_ich8lan;
601 mac->ops.setup_link = e1000_setup_link_ich8lan;
602 /* physical interface setup */
603 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
605 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
607 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
608 /* multicast address update */
609 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
610 /* clear hardware counters */
611 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
613 /* LED and other operations */
618 /* check management mode */
619 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
621 mac->ops.id_led_init = e1000_id_led_init_generic;
623 mac->ops.blink_led = e1000_blink_led_generic;
625 mac->ops.setup_led = e1000_setup_led_generic;
627 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
628 /* turn on/off LED */
629 mac->ops.led_on = e1000_led_on_ich8lan;
630 mac->ops.led_off = e1000_led_off_ich8lan;
633 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
634 mac->ops.rar_set = e1000_rar_set_pch2lan;
637 /* check management mode */
638 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
640 mac->ops.id_led_init = e1000_id_led_init_pchlan;
642 mac->ops.setup_led = e1000_setup_led_pchlan;
644 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
645 /* turn on/off LED */
646 mac->ops.led_on = e1000_led_on_pchlan;
647 mac->ops.led_off = e1000_led_off_pchlan;
653 /* Enable PCS Lock-loss workaround for ICH8 */
654 if (mac->type == e1000_ich8lan)
655 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
657 /* Gate automatic PHY configuration by hardware on managed 82579 */
658 if ((mac->type == e1000_pch2lan) &&
659 (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
660 e1000_gate_hw_phy_config_ich8lan(hw, true);
662 return E1000_SUCCESS;
666 * __e1000_access_emi_reg_locked - Read/write EMI register
667 * @hw: pointer to the HW structure
668 * @addr: EMI address to program
669 * @data: pointer to value to read/write from/to the EMI address
670 * @read: boolean flag to indicate read or write
672 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
674 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
675 u16 *data, bool read)
679 DEBUGFUNC("__e1000_access_emi_reg_locked");
681 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
686 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
689 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
696 * e1000_read_emi_reg_locked - Read Extended Management Interface register
697 * @hw: pointer to the HW structure
698 * @addr: EMI address to program
699 * @data: value to be read from the EMI address
701 * Assumes the SW/FW/HW Semaphore is already acquired.
703 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
705 DEBUGFUNC("e1000_read_emi_reg_locked");
707 return __e1000_access_emi_reg_locked(hw, addr, data, true);
711 * e1000_write_emi_reg_locked - Write Extended Management Interface register
712 * @hw: pointer to the HW structure
713 * @addr: EMI address to program
714 * @data: value to be written to the EMI address
716 * Assumes the SW/FW/HW Semaphore is already acquired.
718 STATIC s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
720 DEBUGFUNC("e1000_read_emi_reg_locked");
722 return __e1000_access_emi_reg_locked(hw, addr, &data, false);
726 * e1000_set_eee_pchlan - Enable/disable EEE support
727 * @hw: pointer to the HW structure
729 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
730 * the link and the EEE capabilities of the link partner. The LPI Control
731 * register bits will remain set only if/when link is up.
733 STATIC s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
735 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
739 DEBUGFUNC("e1000_set_eee_pchlan");
741 if ((hw->phy.type != e1000_phy_82579) &&
742 (hw->phy.type != e1000_phy_i217))
743 return E1000_SUCCESS;
745 ret_val = hw->phy.ops.acquire(hw);
749 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
753 /* Clear bits that enable EEE in various speeds */
754 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
756 /* Enable EEE if not disabled by user */
757 if (!dev_spec->eee_disable) {
758 u16 lpa, pcs_status, data;
760 /* Save off link partner's EEE ability */
761 switch (hw->phy.type) {
762 case e1000_phy_82579:
763 lpa = I82579_EEE_LP_ABILITY;
764 pcs_status = I82579_EEE_PCS_STATUS;
767 lpa = I217_EEE_LP_ABILITY;
768 pcs_status = I217_EEE_PCS_STATUS;
771 ret_val = -E1000_ERR_PHY;
774 ret_val = e1000_read_emi_reg_locked(hw, lpa,
775 &dev_spec->eee_lp_ability);
780 * Enable EEE only for speeds in which the link partner is
783 if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
784 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
786 if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
787 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
788 if (data & NWAY_LPAR_100TX_FD_CAPS)
789 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
792 * EEE is not supported in 100Half, so ignore
793 * partner's EEE in 100 ability if full-duplex
796 dev_spec->eee_lp_ability &=
797 ~I82579_EEE_100_SUPPORTED;
800 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
801 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
806 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
808 hw->phy.ops.release(hw);
814 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
815 * @hw: pointer to the HW structure
817 * Checks to see of the link status of the hardware has changed. If a
818 * change in link status has been detected, then we read the PHY registers
819 * to get the current speed/duplex if link exists.
821 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
823 struct e1000_mac_info *mac = &hw->mac;
828 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
830 /* We only want to go out to the PHY registers to see if Auto-Neg
831 * has completed and/or if our link status has changed. The
832 * get_link_status flag is set upon receiving a Link Status
833 * Change or Rx Sequence Error interrupt.
835 if (!mac->get_link_status)
836 return E1000_SUCCESS;
839 * First we want to see if the MII Status Register reports
840 * link. If so, then we want to get the current speed/duplex
843 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
847 if (hw->mac.type == e1000_pchlan) {
848 ret_val = e1000_k1_gig_workaround_hv(hw, link);
853 /* Clear link partner's EEE ability */
854 hw->dev_spec.ich8lan.eee_lp_ability = 0;
857 return E1000_SUCCESS; /* No link detected */
859 mac->get_link_status = false;
861 switch (hw->mac.type) {
863 ret_val = e1000_k1_workaround_lv(hw);
868 if (hw->phy.type == e1000_phy_82578) {
869 ret_val = e1000_link_stall_workaround_hv(hw);
874 /* Workaround for PCHx parts in half-duplex:
875 * Set the number of preambles removed from the packet
876 * when it is passed from the PHY to the MAC to prevent
877 * the MAC from misinterpreting the packet type.
879 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
880 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
882 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
884 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
886 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
892 /* Check if there was DownShift, must be checked
893 * immediately after link-up
895 e1000_check_downshift_generic(hw);
897 /* Enable/Disable EEE after link up */
898 ret_val = e1000_set_eee_pchlan(hw);
902 /* If we are forcing speed/duplex, then we simply return since
903 * we have already determined whether we have link or not.
906 return -E1000_ERR_CONFIG;
908 /* Auto-Neg is enabled. Auto Speed Detection takes care
909 * of MAC speed/duplex configuration. So we only need to
910 * configure Collision Distance in the MAC.
912 mac->ops.config_collision_dist(hw);
914 /* Configure Flow Control now that Auto-Neg has completed.
915 * First, we need to restore the desired flow control
916 * settings because we may have had to re-autoneg with a
917 * different link partner.
919 ret_val = e1000_config_fc_after_link_up_generic(hw);
921 DEBUGOUT("Error configuring flow control\n");
927 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
928 * @hw: pointer to the HW structure
930 * Initialize family-specific function pointers for PHY, MAC, and NVM.
932 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
934 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
936 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
937 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
938 switch (hw->mac.type) {
942 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
946 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
954 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
955 * @hw: pointer to the HW structure
957 * Acquires the mutex for performing NVM operations.
959 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
961 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
963 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
965 return E1000_SUCCESS;
969 * e1000_release_nvm_ich8lan - Release NVM mutex
970 * @hw: pointer to the HW structure
972 * Releases the mutex used while performing NVM operations.
974 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
976 DEBUGFUNC("e1000_release_nvm_ich8lan");
978 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
984 * e1000_acquire_swflag_ich8lan - Acquire software control flag
985 * @hw: pointer to the HW structure
987 * Acquires the software control flag for performing PHY and select
990 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
992 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
993 s32 ret_val = E1000_SUCCESS;
995 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
997 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1000 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1001 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1009 DEBUGOUT("SW has already locked the resource.\n");
1010 ret_val = -E1000_ERR_CONFIG;
1014 timeout = SW_FLAG_TIMEOUT;
1016 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1017 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1020 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1021 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1029 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1030 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1031 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1032 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1033 ret_val = -E1000_ERR_CONFIG;
1039 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1045 * e1000_release_swflag_ich8lan - Release software control flag
1046 * @hw: pointer to the HW structure
1048 * Releases the software control flag for performing PHY and select
1051 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1055 DEBUGFUNC("e1000_release_swflag_ich8lan");
1057 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1059 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1060 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1061 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1063 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1066 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1072 * e1000_check_mng_mode_ich8lan - Checks management mode
1073 * @hw: pointer to the HW structure
1075 * This checks if the adapter has any manageability enabled.
1076 * This is a function pointer entry point only called by read/write
1077 * routines for the PHY and NVM parts.
1079 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1083 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1085 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1087 return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
1088 ((fwsm & E1000_FWSM_MODE_MASK) ==
1089 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
1093 * e1000_check_mng_mode_pchlan - Checks management mode
1094 * @hw: pointer to the HW structure
1096 * This checks if the adapter has iAMT enabled.
1097 * This is a function pointer entry point only called by read/write
1098 * routines for the PHY and NVM parts.
1100 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1104 DEBUGFUNC("e1000_check_mng_mode_pchlan");
1106 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1108 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1109 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1113 * e1000_rar_set_pch2lan - Set receive address register
1114 * @hw: pointer to the HW structure
1115 * @addr: pointer to the receive address
1116 * @index: receive address array register
1118 * Sets the receive address array register at index to the address passed
1119 * in by addr. For 82579, RAR[0] is the base address register that is to
1120 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1121 * Use SHRA[0-3] in place of those reserved for ME.
1123 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1125 u32 rar_low, rar_high;
1127 DEBUGFUNC("e1000_rar_set_pch2lan");
1129 /* HW expects these in little endian so we reverse the byte order
1130 * from network order (big endian) to little endian
1132 rar_low = ((u32) addr[0] |
1133 ((u32) addr[1] << 8) |
1134 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1136 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1138 /* If MAC address zero, no need to set the AV bit */
1139 if (rar_low || rar_high)
1140 rar_high |= E1000_RAH_AV;
1143 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1144 E1000_WRITE_FLUSH(hw);
1145 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1146 E1000_WRITE_FLUSH(hw);
1150 if (index < hw->mac.rar_entry_count) {
1153 ret_val = e1000_acquire_swflag_ich8lan(hw);
1157 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1158 E1000_WRITE_FLUSH(hw);
1159 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1160 E1000_WRITE_FLUSH(hw);
1162 e1000_release_swflag_ich8lan(hw);
1164 /* verify the register updates */
1165 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1166 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1169 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1170 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1174 DEBUGOUT1("Failed to write receive address at index %d\n", index);
1178 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1179 * @hw: pointer to the HW structure
1181 * Checks if firmware is blocking the reset of the PHY.
1182 * This is a function pointer entry point only called by
1185 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1189 DEBUGFUNC("e1000_check_reset_block_ich8lan");
1191 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1193 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
1194 : E1000_BLK_PHY_RESET;
1198 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1199 * @hw: pointer to the HW structure
1201 * Assumes semaphore already acquired.
1204 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1207 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
1208 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1209 E1000_STRAP_SMT_FREQ_SHIFT;
1212 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1214 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1218 phy_data &= ~HV_SMB_ADDR_MASK;
1219 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1220 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1222 if (hw->phy.type == e1000_phy_i217) {
1223 /* Restore SMBus frequency */
1225 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1226 phy_data |= (freq & (1 << 0)) <<
1227 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1228 phy_data |= (freq & (1 << 1)) <<
1229 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1231 DEBUGOUT("Unsupported SMB frequency in PHY\n");
1235 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1239 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1240 * @hw: pointer to the HW structure
1242 * SW should configure the LCD from the NVM extended configuration region
1243 * as a workaround for certain parts.
1245 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1247 struct e1000_phy_info *phy = &hw->phy;
1248 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1249 s32 ret_val = E1000_SUCCESS;
1250 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1252 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
1254 /* Initialize the PHY from the NVM on ICH platforms. This
1255 * is needed due to an issue where the NVM configuration is
1256 * not properly autoloaded after power transitions.
1257 * Therefore, after each PHY reset, we will load the
1258 * configuration data out of the NVM manually.
1260 switch (hw->mac.type) {
1262 if (phy->type != e1000_phy_igp_3)
1265 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
1266 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
1267 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1273 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1279 ret_val = hw->phy.ops.acquire(hw);
1283 data = E1000_READ_REG(hw, E1000_FEXTNVM);
1284 if (!(data & sw_cfg_mask))
1287 /* Make sure HW does not configure LCD from PHY
1288 * extended configuration before SW configuration
1290 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1291 if ((hw->mac.type < e1000_pch2lan) &&
1292 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1295 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
1296 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1297 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1301 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1302 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1304 if (((hw->mac.type == e1000_pchlan) &&
1305 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1306 (hw->mac.type > e1000_pchlan)) {
1307 /* HW configures the SMBus address and LEDs when the
1308 * OEM and LCD Write Enable bits are set in the NVM.
1309 * When both NVM bits are cleared, SW will configure
1312 ret_val = e1000_write_smbus_addr(hw);
1316 data = E1000_READ_REG(hw, E1000_LEDCTL);
1317 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1323 /* Configure LCD from extended configuration region. */
1325 /* cnf_base_addr is in DWORD */
1326 word_addr = (u16)(cnf_base_addr << 1);
1328 for (i = 0; i < cnf_size; i++) {
1329 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
1334 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1339 /* Save off the PHY page for future writes. */
1340 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1341 phy_page = reg_data;
1345 reg_addr &= PHY_REG_MASK;
1346 reg_addr |= phy_page;
1348 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1355 hw->phy.ops.release(hw);
1360 * e1000_k1_gig_workaround_hv - K1 Si workaround
1361 * @hw: pointer to the HW structure
1362 * @link: link up bool flag
1364 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1365 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1366 * If link is down, the function will restore the default K1 setting located
1369 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1371 s32 ret_val = E1000_SUCCESS;
1373 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1375 DEBUGFUNC("e1000_k1_gig_workaround_hv");
1377 if (hw->mac.type != e1000_pchlan)
1378 return E1000_SUCCESS;
1380 /* Wrap the whole flow with the sw flag */
1381 ret_val = hw->phy.ops.acquire(hw);
1385 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1387 if (hw->phy.type == e1000_phy_82578) {
1388 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1393 status_reg &= (BM_CS_STATUS_LINK_UP |
1394 BM_CS_STATUS_RESOLVED |
1395 BM_CS_STATUS_SPEED_MASK);
1397 if (status_reg == (BM_CS_STATUS_LINK_UP |
1398 BM_CS_STATUS_RESOLVED |
1399 BM_CS_STATUS_SPEED_1000))
1403 if (hw->phy.type == e1000_phy_82577) {
1404 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1409 status_reg &= (HV_M_STATUS_LINK_UP |
1410 HV_M_STATUS_AUTONEG_COMPLETE |
1411 HV_M_STATUS_SPEED_MASK);
1413 if (status_reg == (HV_M_STATUS_LINK_UP |
1414 HV_M_STATUS_AUTONEG_COMPLETE |
1415 HV_M_STATUS_SPEED_1000))
1419 /* Link stall fix for link up */
1420 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1426 /* Link stall fix for link down */
1427 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1433 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1436 hw->phy.ops.release(hw);
1442 * e1000_configure_k1_ich8lan - Configure K1 power state
1443 * @hw: pointer to the HW structure
1444 * @enable: K1 state to configure
1446 * Configure the K1 power state based on the provided parameter.
1447 * Assumes semaphore already acquired.
1449 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1451 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1459 DEBUGFUNC("e1000_configure_k1_ich8lan");
1461 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1467 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1469 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1471 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1477 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1478 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1480 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1481 reg |= E1000_CTRL_FRCSPD;
1482 E1000_WRITE_REG(hw, E1000_CTRL, reg);
1484 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1485 E1000_WRITE_FLUSH(hw);
1487 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1488 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1489 E1000_WRITE_FLUSH(hw);
1492 return E1000_SUCCESS;
1496 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1497 * @hw: pointer to the HW structure
1498 * @d0_state: boolean if entering d0 or d3 device state
1500 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1501 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1502 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1504 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1510 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
1512 if (hw->mac.type < e1000_pchlan)
1515 ret_val = hw->phy.ops.acquire(hw);
1519 if (hw->mac.type == e1000_pchlan) {
1520 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1521 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1525 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
1526 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1529 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
1531 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1535 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1538 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1539 oem_reg |= HV_OEM_BITS_GBE_DIS;
1541 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1542 oem_reg |= HV_OEM_BITS_LPLU;
1544 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1545 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1546 oem_reg |= HV_OEM_BITS_GBE_DIS;
1548 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1549 E1000_PHY_CTRL_NOND0A_LPLU))
1550 oem_reg |= HV_OEM_BITS_LPLU;
1553 /* Set Restart auto-neg to activate the bits */
1554 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1555 !hw->phy.ops.check_reset_block(hw))
1556 oem_reg |= HV_OEM_BITS_RESTART_AN;
1558 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1561 hw->phy.ops.release(hw);
1568 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1569 * @hw: pointer to the HW structure
1571 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1576 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
1578 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
1582 data |= HV_KMRN_MDIO_SLOW;
1584 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
1590 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1591 * done after every PHY reset.
1593 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1595 s32 ret_val = E1000_SUCCESS;
1598 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
1600 if (hw->mac.type != e1000_pchlan)
1601 return E1000_SUCCESS;
1603 /* Set MDIO slow mode before any other MDIO access */
1604 if (hw->phy.type == e1000_phy_82577) {
1605 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1610 if (((hw->phy.type == e1000_phy_82577) &&
1611 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1612 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1613 /* Disable generation of early preamble */
1614 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
1618 /* Preamble tuning for SSC */
1619 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
1625 if (hw->phy.type == e1000_phy_82578) {
1626 /* Return registers to default by doing a soft reset then
1627 * writing 0x3140 to the control register.
1629 if (hw->phy.revision < 2) {
1630 e1000_phy_sw_reset_generic(hw);
1631 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
1637 ret_val = hw->phy.ops.acquire(hw);
1642 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1643 hw->phy.ops.release(hw);
1647 /* Configure the K1 Si workaround during phy reset assuming there is
1648 * link so that it disables K1 if link is in 1Gbps.
1650 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1654 /* Workaround for link disconnects on a busy hub in half duplex */
1655 ret_val = hw->phy.ops.acquire(hw);
1658 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1661 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
1666 /* set MSE higher to enable link to stay up when noise is high */
1667 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
1669 hw->phy.ops.release(hw);
1675 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1676 * @hw: pointer to the HW structure
1678 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1684 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
1686 ret_val = hw->phy.ops.acquire(hw);
1689 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1693 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1694 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1695 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
1696 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1697 (u16)(mac_reg & 0xFFFF));
1698 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1699 (u16)((mac_reg >> 16) & 0xFFFF));
1701 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
1702 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1703 (u16)(mac_reg & 0xFFFF));
1704 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1705 (u16)((mac_reg & E1000_RAH_AV)
1709 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1712 hw->phy.ops.release(hw);
1715 static u32 e1000_calc_rx_da_crc(u8 mac[])
1717 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1718 u32 i, j, mask, crc;
1720 DEBUGFUNC("e1000_calc_rx_da_crc");
1723 for (i = 0; i < 6; i++) {
1725 for (j = 8; j > 0; j--) {
1726 mask = (crc & 1) * (-1);
1727 crc = (crc >> 1) ^ (poly & mask);
1734 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1736 * @hw: pointer to the HW structure
1737 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1739 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1741 s32 ret_val = E1000_SUCCESS;
1746 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
1748 if (hw->mac.type != e1000_pch2lan)
1749 return E1000_SUCCESS;
1751 /* disable Rx path while enabling/disabling workaround */
1752 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
1753 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
1754 phy_reg | (1 << 14));
1759 /* Write Rx addresses (rar_entry_count for RAL/H, and
1760 * SHRAL/H) and initial CRC values to the MAC
1762 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1763 u8 mac_addr[ETH_ADDR_LEN] = {0};
1764 u32 addr_high, addr_low;
1766 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
1767 if (!(addr_high & E1000_RAH_AV))
1769 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
1770 mac_addr[0] = (addr_low & 0xFF);
1771 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1772 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1773 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1774 mac_addr[4] = (addr_high & 0xFF);
1775 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1777 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
1778 e1000_calc_rx_da_crc(mac_addr));
1781 /* Write Rx addresses to the PHY */
1782 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1784 /* Enable jumbo frame workaround in the MAC */
1785 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1786 mac_reg &= ~(1 << 14);
1787 mac_reg |= (7 << 15);
1788 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1790 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1791 mac_reg |= E1000_RCTL_SECRC;
1792 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1794 ret_val = e1000_read_kmrn_reg_generic(hw,
1795 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1799 ret_val = e1000_write_kmrn_reg_generic(hw,
1800 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1804 ret_val = e1000_read_kmrn_reg_generic(hw,
1805 E1000_KMRNCTRLSTA_HD_CTRL,
1809 data &= ~(0xF << 8);
1811 ret_val = e1000_write_kmrn_reg_generic(hw,
1812 E1000_KMRNCTRLSTA_HD_CTRL,
1817 /* Enable jumbo frame workaround in the PHY */
1818 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1819 data &= ~(0x7F << 5);
1820 data |= (0x37 << 5);
1821 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1824 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1826 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1829 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1830 data &= ~(0x3FF << 2);
1831 data |= (0x1A << 2);
1832 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1835 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
1838 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1839 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
1844 /* Write MAC register values back to h/w defaults */
1845 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
1846 mac_reg &= ~(0xF << 14);
1847 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
1849 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
1850 mac_reg &= ~E1000_RCTL_SECRC;
1851 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
1853 ret_val = e1000_read_kmrn_reg_generic(hw,
1854 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1858 ret_val = e1000_write_kmrn_reg_generic(hw,
1859 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1863 ret_val = e1000_read_kmrn_reg_generic(hw,
1864 E1000_KMRNCTRLSTA_HD_CTRL,
1868 data &= ~(0xF << 8);
1870 ret_val = e1000_write_kmrn_reg_generic(hw,
1871 E1000_KMRNCTRLSTA_HD_CTRL,
1876 /* Write PHY register values back to h/w defaults */
1877 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
1878 data &= ~(0x7F << 5);
1879 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
1882 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
1884 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
1887 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
1888 data &= ~(0x3FF << 2);
1890 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
1893 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
1896 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
1897 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
1903 /* re-enable Rx path after enabling/disabling workaround */
1904 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
1909 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1910 * done after every PHY reset.
1912 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1914 s32 ret_val = E1000_SUCCESS;
1916 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
1918 if (hw->mac.type != e1000_pch2lan)
1919 return E1000_SUCCESS;
1921 /* Set MDIO slow mode before any other MDIO access */
1922 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1926 ret_val = hw->phy.ops.acquire(hw);
1929 /* set MSE higher to enable link to stay up when noise is high */
1930 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
1933 /* drop link after 5 times MSE threshold was reached */
1934 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
1936 hw->phy.ops.release(hw);
1942 * e1000_k1_gig_workaround_lv - K1 Si workaround
1943 * @hw: pointer to the HW structure
1945 * Workaround to set the K1 beacon duration for 82579 parts
1947 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1949 s32 ret_val = E1000_SUCCESS;
1954 DEBUGFUNC("e1000_k1_workaround_lv");
1956 if (hw->mac.type != e1000_pch2lan)
1957 return E1000_SUCCESS;
1959 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1960 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
1964 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1965 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1966 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1967 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1969 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
1973 if (status_reg & HV_M_STATUS_SPEED_1000) {
1976 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1977 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1978 /* LV 1G Packet drop issue wa */
1979 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
1983 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
1984 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
1989 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1990 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1992 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1993 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2000 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2001 * @hw: pointer to the HW structure
2002 * @gate: boolean set to true to gate, false to ungate
2004 * Gate/ungate the automatic PHY configuration via hardware; perform
2005 * the configuration via software instead.
2007 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2011 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2013 if (hw->mac.type != e1000_pch2lan)
2016 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2019 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2021 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2023 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2027 * e1000_lan_init_done_ich8lan - Check for PHY config completion
2028 * @hw: pointer to the HW structure
2030 * Check the appropriate indication the MAC has finished configuring the
2031 * PHY after a software reset.
2033 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2035 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2037 DEBUGFUNC("e1000_lan_init_done_ich8lan");
2039 /* Wait for basic configuration completes before proceeding */
2041 data = E1000_READ_REG(hw, E1000_STATUS);
2042 data &= E1000_STATUS_LAN_INIT_DONE;
2044 } while ((!data) && --loop);
2046 /* If basic configuration is incomplete before the above loop
2047 * count reaches 0, loading the configuration from NVM will
2048 * leave the PHY in a bad state possibly resulting in no link.
2051 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2053 /* Clear the Init Done bit for the next init event */
2054 data = E1000_READ_REG(hw, E1000_STATUS);
2055 data &= ~E1000_STATUS_LAN_INIT_DONE;
2056 E1000_WRITE_REG(hw, E1000_STATUS, data);
2060 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2061 * @hw: pointer to the HW structure
2063 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2065 s32 ret_val = E1000_SUCCESS;
2068 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2070 if (hw->phy.ops.check_reset_block(hw))
2071 return E1000_SUCCESS;
2073 /* Allow time for h/w to get to quiescent state after reset */
2076 /* Perform any necessary post-reset workarounds */
2077 switch (hw->mac.type) {
2079 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2084 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2092 /* Clear the host wakeup bit after lcd reset */
2093 if (hw->mac.type >= e1000_pchlan) {
2094 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
2095 reg &= ~BM_WUC_HOST_WU_BIT;
2096 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2099 /* Configure the LCD with the extended configuration region in NVM */
2100 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2104 /* Configure the LCD with the OEM bits in NVM */
2105 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2107 if (hw->mac.type == e1000_pch2lan) {
2108 /* Ungate automatic PHY configuration on non-managed 82579 */
2109 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2110 E1000_ICH_FWSM_FW_VALID)) {
2112 e1000_gate_hw_phy_config_ich8lan(hw, false);
2115 /* Set EEE LPI Update Timer to 200usec */
2116 ret_val = hw->phy.ops.acquire(hw);
2119 ret_val = e1000_write_emi_reg_locked(hw,
2120 I82579_LPI_UPDATE_TIMER,
2122 hw->phy.ops.release(hw);
2129 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2130 * @hw: pointer to the HW structure
2133 * This is a function pointer entry point called by drivers
2134 * or other shared routines.
2136 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2138 s32 ret_val = E1000_SUCCESS;
2140 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2142 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2143 if ((hw->mac.type == e1000_pch2lan) &&
2144 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2145 e1000_gate_hw_phy_config_ich8lan(hw, true);
2147 ret_val = e1000_phy_hw_reset_generic(hw);
2151 return e1000_post_phy_reset_ich8lan(hw);
2155 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2156 * @hw: pointer to the HW structure
2157 * @active: true to enable LPLU, false to disable
2159 * Sets the LPLU state according to the active flag. For PCH, if OEM write
2160 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2161 * the phy speed. This function will manually set the LPLU bit and restart
2162 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
2163 * since it configures the same bit.
2165 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2170 DEBUGFUNC("e1000_set_lplu_state_pchlan");
2172 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2177 oem_reg |= HV_OEM_BITS_LPLU;
2179 oem_reg &= ~HV_OEM_BITS_LPLU;
2181 if (!hw->phy.ops.check_reset_block(hw))
2182 oem_reg |= HV_OEM_BITS_RESTART_AN;
2184 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2188 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2189 * @hw: pointer to the HW structure
2190 * @active: true to enable LPLU, false to disable
2192 * Sets the LPLU D0 state according to the active flag. When
2193 * activating LPLU this function also disables smart speed
2194 * and vice versa. LPLU will not be activated unless the
2195 * device autonegotiation advertisement meets standards of
2196 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2197 * This is a function pointer entry point only called by
2198 * PHY setup routines.
2200 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2202 struct e1000_phy_info *phy = &hw->phy;
2204 s32 ret_val = E1000_SUCCESS;
2207 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
2209 if (phy->type == e1000_phy_ife)
2210 return E1000_SUCCESS;
2212 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2215 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2216 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2218 if (phy->type != e1000_phy_igp_3)
2219 return E1000_SUCCESS;
2221 /* Call gig speed drop workaround on LPLU before accessing
2224 if (hw->mac.type == e1000_ich8lan)
2225 e1000_gig_downshift_workaround_ich8lan(hw);
2227 /* When LPLU is enabled, we should disable SmartSpeed */
2228 ret_val = phy->ops.read_reg(hw,
2229 IGP01E1000_PHY_PORT_CONFIG,
2231 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2232 ret_val = phy->ops.write_reg(hw,
2233 IGP01E1000_PHY_PORT_CONFIG,
2238 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2239 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2241 if (phy->type != e1000_phy_igp_3)
2242 return E1000_SUCCESS;
2244 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
2245 * during Dx states where the power conservation is most
2246 * important. During driver activity we should enable
2247 * SmartSpeed, so performance is maintained.
2249 if (phy->smart_speed == e1000_smart_speed_on) {
2250 ret_val = phy->ops.read_reg(hw,
2251 IGP01E1000_PHY_PORT_CONFIG,
2256 data |= IGP01E1000_PSCFR_SMART_SPEED;
2257 ret_val = phy->ops.write_reg(hw,
2258 IGP01E1000_PHY_PORT_CONFIG,
2262 } else if (phy->smart_speed == e1000_smart_speed_off) {
2263 ret_val = phy->ops.read_reg(hw,
2264 IGP01E1000_PHY_PORT_CONFIG,
2269 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2270 ret_val = phy->ops.write_reg(hw,
2271 IGP01E1000_PHY_PORT_CONFIG,
2278 return E1000_SUCCESS;
2282 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2283 * @hw: pointer to the HW structure
2284 * @active: true to enable LPLU, false to disable
2286 * Sets the LPLU D3 state according to the active flag. When
2287 * activating LPLU this function also disables smart speed
2288 * and vice versa. LPLU will not be activated unless the
2289 * device autonegotiation advertisement meets standards of
2290 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2291 * This is a function pointer entry point only called by
2292 * PHY setup routines.
2294 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2296 struct e1000_phy_info *phy = &hw->phy;
2298 s32 ret_val = E1000_SUCCESS;
2301 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
2303 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
2306 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2307 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2309 if (phy->type != e1000_phy_igp_3)
2310 return E1000_SUCCESS;
2312 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
2313 * during Dx states where the power conservation is most
2314 * important. During driver activity we should enable
2315 * SmartSpeed, so performance is maintained.
2317 if (phy->smart_speed == e1000_smart_speed_on) {
2318 ret_val = phy->ops.read_reg(hw,
2319 IGP01E1000_PHY_PORT_CONFIG,
2324 data |= IGP01E1000_PSCFR_SMART_SPEED;
2325 ret_val = phy->ops.write_reg(hw,
2326 IGP01E1000_PHY_PORT_CONFIG,
2330 } else if (phy->smart_speed == e1000_smart_speed_off) {
2331 ret_val = phy->ops.read_reg(hw,
2332 IGP01E1000_PHY_PORT_CONFIG,
2337 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2338 ret_val = phy->ops.write_reg(hw,
2339 IGP01E1000_PHY_PORT_CONFIG,
2344 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2345 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2346 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2347 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2348 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
2350 if (phy->type != e1000_phy_igp_3)
2351 return E1000_SUCCESS;
2353 /* Call gig speed drop workaround on LPLU before accessing
2356 if (hw->mac.type == e1000_ich8lan)
2357 e1000_gig_downshift_workaround_ich8lan(hw);
2359 /* When LPLU is enabled, we should disable SmartSpeed */
2360 ret_val = phy->ops.read_reg(hw,
2361 IGP01E1000_PHY_PORT_CONFIG,
2366 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2367 ret_val = phy->ops.write_reg(hw,
2368 IGP01E1000_PHY_PORT_CONFIG,
2376 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2377 * @hw: pointer to the HW structure
2378 * @bank: pointer to the variable that returns the active bank
2380 * Reads signature byte from the NVM using the flash access registers.
2381 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2383 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2386 struct e1000_nvm_info *nvm = &hw->nvm;
2387 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2388 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2392 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
2394 switch (hw->mac.type) {
2397 eecd = E1000_READ_REG(hw, E1000_EECD);
2398 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2399 E1000_EECD_SEC1VAL_VALID_MASK) {
2400 if (eecd & E1000_EECD_SEC1VAL)
2405 return E1000_SUCCESS;
2407 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2410 /* set bank to 0 in case flash read fails */
2414 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2418 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2419 E1000_ICH_NVM_SIG_VALUE) {
2421 return E1000_SUCCESS;
2425 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2430 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2431 E1000_ICH_NVM_SIG_VALUE) {
2433 return E1000_SUCCESS;
2436 DEBUGOUT("ERROR: No valid NVM bank present\n");
2437 return -E1000_ERR_NVM;
2442 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2443 * @hw: pointer to the HW structure
2444 * @offset: The offset (in bytes) of the word(s) to read.
2445 * @words: Size of data to read in words
2446 * @data: Pointer to the word(s) to read at offset.
2448 * Reads a word(s) from the NVM using the flash access registers.
2450 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2453 struct e1000_nvm_info *nvm = &hw->nvm;
2454 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2456 s32 ret_val = E1000_SUCCESS;
2460 DEBUGFUNC("e1000_read_nvm_ich8lan");
2462 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2464 DEBUGOUT("nvm parameter(s) out of bounds\n");
2465 ret_val = -E1000_ERR_NVM;
2469 nvm->ops.acquire(hw);
2471 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2472 if (ret_val != E1000_SUCCESS) {
2473 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2477 act_offset = (bank) ? nvm->flash_bank_size : 0;
2478 act_offset += offset;
2480 ret_val = E1000_SUCCESS;
2481 for (i = 0; i < words; i++) {
2482 if (dev_spec->shadow_ram[offset+i].modified) {
2483 data[i] = dev_spec->shadow_ram[offset+i].value;
2485 ret_val = e1000_read_flash_word_ich8lan(hw,
2494 nvm->ops.release(hw);
2498 DEBUGOUT1("NVM read error: %d\n", ret_val);
2504 * e1000_flash_cycle_init_ich8lan - Initialize flash
2505 * @hw: pointer to the HW structure
2507 * This function does initial flash setup so that a new read/write/erase cycle
2510 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2512 union ich8_hws_flash_status hsfsts;
2513 s32 ret_val = -E1000_ERR_NVM;
2515 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
2517 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2519 /* Check if the flash descriptor is valid */
2520 if (!hsfsts.hsf_status.fldesvalid) {
2521 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
2522 return -E1000_ERR_NVM;
2525 /* Clear FCERR and DAEL in hw status by writing 1 */
2526 hsfsts.hsf_status.flcerr = 1;
2527 hsfsts.hsf_status.dael = 1;
2529 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2531 /* Either we should have a hardware SPI cycle in progress
2532 * bit to check against, in order to start a new cycle or
2533 * FDONE bit should be changed in the hardware so that it
2534 * is 1 after hardware reset, which can then be used as an
2535 * indication whether a cycle is in progress or has been
2539 if (!hsfsts.hsf_status.flcinprog) {
2540 /* There is no cycle running at present,
2541 * so we can start a cycle.
2542 * Begin by setting Flash Cycle Done.
2544 hsfsts.hsf_status.flcdone = 1;
2545 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
2546 ret_val = E1000_SUCCESS;
2550 /* Otherwise poll for sometime so the current
2551 * cycle has a chance to end before giving up.
2553 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2554 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2556 if (!hsfsts.hsf_status.flcinprog) {
2557 ret_val = E1000_SUCCESS;
2562 if (ret_val == E1000_SUCCESS) {
2563 /* Successful in waiting for previous cycle to timeout,
2564 * now set the Flash Cycle Done.
2566 hsfsts.hsf_status.flcdone = 1;
2567 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
2570 DEBUGOUT("Flash controller busy, cannot get access\n");
2578 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2579 * @hw: pointer to the HW structure
2580 * @timeout: maximum time to wait for completion
2582 * This function starts a flash cycle and waits for its completion.
2584 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2586 union ich8_hws_flash_ctrl hsflctl;
2587 union ich8_hws_flash_status hsfsts;
2590 DEBUGFUNC("e1000_flash_cycle_ich8lan");
2592 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2593 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2594 hsflctl.hsf_ctrl.flcgo = 1;
2595 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2597 /* wait till FDONE bit is set to 1 */
2599 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
2600 if (hsfsts.hsf_status.flcdone)
2603 } while (i++ < timeout);
2605 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
2606 return E1000_SUCCESS;
2608 return -E1000_ERR_NVM;
2612 * e1000_read_flash_word_ich8lan - Read word from flash
2613 * @hw: pointer to the HW structure
2614 * @offset: offset to data location
2615 * @data: pointer to the location for storing the data
2617 * Reads the flash word at offset into data. Offset is converted
2618 * to bytes before read.
2620 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2623 DEBUGFUNC("e1000_read_flash_word_ich8lan");
2626 return -E1000_ERR_NVM;
2628 /* Must convert offset into bytes. */
2631 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2635 * e1000_read_flash_byte_ich8lan - Read byte from flash
2636 * @hw: pointer to the HW structure
2637 * @offset: The offset of the byte to read.
2638 * @data: Pointer to a byte to store the value read.
2640 * Reads a single byte from the NVM using the flash access registers.
2642 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2648 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2654 return E1000_SUCCESS;
2658 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2659 * @hw: pointer to the HW structure
2660 * @offset: The offset (in bytes) of the byte or word to read.
2661 * @size: Size of data to read, 1=byte 2=word
2662 * @data: Pointer to the word to store the value read.
2664 * Reads a byte or word from the NVM using the flash access registers.
2666 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2669 union ich8_hws_flash_status hsfsts;
2670 union ich8_hws_flash_ctrl hsflctl;
2671 u32 flash_linear_addr;
2673 s32 ret_val = -E1000_ERR_NVM;
2676 DEBUGFUNC("e1000_read_flash_data_ich8lan");
2678 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2679 return -E1000_ERR_NVM;
2681 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2682 hw->nvm.flash_base_addr);
2687 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2688 if (ret_val != E1000_SUCCESS)
2691 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
2692 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2693 hsflctl.hsf_ctrl.fldbcount = size - 1;
2694 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2695 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
2697 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
2700 e1000_flash_cycle_ich8lan(hw,
2701 ICH_FLASH_READ_COMMAND_TIMEOUT);
2703 /* Check if FCERR is set to 1, if set to 1, clear it
2704 * and try the whole sequence a few more times, else
2705 * read in (shift in) the Flash Data0, the order is
2706 * least significant byte first msb to lsb
2708 if (ret_val == E1000_SUCCESS) {
2709 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
2711 *data = (u8)(flash_data & 0x000000FF);
2713 *data = (u16)(flash_data & 0x0000FFFF);
2716 /* If we've gotten here, then things are probably
2717 * completely hosed, but if the error condition is
2718 * detected, it won't hurt to give it another try...
2719 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2721 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
2723 if (hsfsts.hsf_status.flcerr) {
2724 /* Repeat for some time before giving up. */
2726 } else if (!hsfsts.hsf_status.flcdone) {
2727 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
2731 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2737 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2738 * @hw: pointer to the HW structure
2739 * @offset: The offset (in bytes) of the word(s) to write.
2740 * @words: Size of data to write in words
2741 * @data: Pointer to the word(s) to write at offset.
2743 * Writes a byte or word to the NVM using the flash access registers.
2745 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2748 struct e1000_nvm_info *nvm = &hw->nvm;
2749 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2752 DEBUGFUNC("e1000_write_nvm_ich8lan");
2754 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2756 DEBUGOUT("nvm parameter(s) out of bounds\n");
2757 return -E1000_ERR_NVM;
2760 nvm->ops.acquire(hw);
2762 for (i = 0; i < words; i++) {
2763 dev_spec->shadow_ram[offset+i].modified = true;
2764 dev_spec->shadow_ram[offset+i].value = data[i];
2767 nvm->ops.release(hw);
2769 return E1000_SUCCESS;
2773 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2774 * @hw: pointer to the HW structure
2776 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2777 * which writes the checksum to the shadow ram. The changes in the shadow
2778 * ram are then committed to the EEPROM by processing each bank at a time
2779 * checking for the modified bit and writing only the pending changes.
2780 * After a successful commit, the shadow ram is cleared and is ready for
2783 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2785 struct e1000_nvm_info *nvm = &hw->nvm;
2786 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2787 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2791 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
2793 ret_val = e1000_update_nvm_checksum_generic(hw);
2797 if (nvm->type != e1000_nvm_flash_sw)
2800 nvm->ops.acquire(hw);
2802 /* We're writing to the opposite bank so if we're on bank 1,
2803 * write to bank 0 etc. We also need to erase the segment that
2804 * is going to be written
2806 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2807 if (ret_val != E1000_SUCCESS) {
2808 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
2813 new_bank_offset = nvm->flash_bank_size;
2814 old_bank_offset = 0;
2815 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2819 old_bank_offset = nvm->flash_bank_size;
2820 new_bank_offset = 0;
2821 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2826 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2827 /* Determine whether to write the value stored
2828 * in the other NVM bank or a modified value stored
2831 if (dev_spec->shadow_ram[i].modified) {
2832 data = dev_spec->shadow_ram[i].value;
2834 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2841 /* If the word is 0x13, then make sure the signature bits
2842 * (15:14) are 11b until the commit has completed.
2843 * This will allow us to write 10b which indicates the
2844 * signature is valid. We want to do this after the write
2845 * has completed so that we don't mark the segment valid
2846 * while the write is still in progress
2848 if (i == E1000_ICH_NVM_SIG_WORD)
2849 data |= E1000_ICH_NVM_SIG_MASK;
2851 /* Convert offset to bytes. */
2852 act_offset = (i + new_bank_offset) << 1;
2855 /* Write the bytes to the new bank. */
2856 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2863 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2870 /* Don't bother writing the segment valid bits if sector
2871 * programming failed.
2874 DEBUGOUT("Flash commit failed.\n");
2878 /* Finally validate the new segment by setting bit 15:14
2879 * to 10b in word 0x13 , this can be done without an
2880 * erase as well since these bits are 11 to start with
2881 * and we need to change bit 14 to 0b
2883 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2884 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2889 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2895 /* And invalidate the previously valid segment by setting
2896 * its signature word (0x13) high_byte to 0b. This can be
2897 * done without an erase because flash erase sets all bits
2898 * to 1's. We can write 1's to 0's without an erase
2900 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2901 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2905 /* Great! Everything worked, we can now clear the cached entries. */
2906 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
2907 dev_spec->shadow_ram[i].modified = false;
2908 dev_spec->shadow_ram[i].value = 0xFFFF;
2912 nvm->ops.release(hw);
2914 /* Reload the EEPROM, or else modifications will not appear
2915 * until after the next adapter reset.
2918 nvm->ops.reload(hw);
2924 DEBUGOUT1("NVM update error: %d\n", ret_val);
2930 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2931 * @hw: pointer to the HW structure
2933 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2934 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2935 * calculated, in which case we need to calculate the checksum and set bit 6.
2937 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2942 u16 valid_csum_mask;
2944 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
2946 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
2947 * the checksum needs to be fixed. This bit is an indication that
2948 * the NVM was prepared by OEM software and did not calculate
2949 * the checksum...a likely scenario.
2951 switch (hw->mac.type) {
2953 word = NVM_FUTURE_INIT_WORD1;
2954 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
2958 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
2962 if (!(data & valid_csum_mask)) {
2963 data |= valid_csum_mask;
2964 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
2967 ret_val = hw->nvm.ops.update(hw);
2972 return e1000_validate_nvm_checksum_generic(hw);
2976 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2977 * @hw: pointer to the HW structure
2978 * @offset: The offset (in bytes) of the byte/word to read.
2979 * @size: Size of data to read, 1=byte 2=word
2980 * @data: The byte(s) to write to the NVM.
2982 * Writes one/two bytes to the NVM using the flash access registers.
2984 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2987 union ich8_hws_flash_status hsfsts;
2988 union ich8_hws_flash_ctrl hsflctl;
2989 u32 flash_linear_addr;
2994 DEBUGFUNC("e1000_write_ich8_data");
2996 if (size < 1 || size > 2 || data > size * 0xff ||
2997 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2998 return -E1000_ERR_NVM;
3000 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3001 hw->nvm.flash_base_addr);
3006 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3007 if (ret_val != E1000_SUCCESS)
3010 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3011 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3012 hsflctl.hsf_ctrl.fldbcount = size - 1;
3013 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3014 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3016 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3019 flash_data = (u32)data & 0x00FF;
3021 flash_data = (u32)data;
3023 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3025 /* check if FCERR is set to 1 , if set to 1, clear it
3026 * and try the whole sequence a few more times else done
3029 e1000_flash_cycle_ich8lan(hw,
3030 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3031 if (ret_val == E1000_SUCCESS)
3034 /* If we're here, then things are most likely
3035 * completely hosed, but if the error condition
3036 * is detected, it won't hurt to give it another
3037 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3039 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3040 if (hsfsts.hsf_status.flcerr)
3041 /* Repeat for some time before giving up. */
3043 if (!hsfsts.hsf_status.flcdone) {
3044 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3047 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3053 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3054 * @hw: pointer to the HW structure
3055 * @offset: The index of the byte to read.
3056 * @data: The byte to write to the NVM.
3058 * Writes a single byte to the NVM using the flash access registers.
3060 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3063 u16 word = (u16)data;
3065 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3067 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3071 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3072 * @hw: pointer to the HW structure
3073 * @offset: The offset of the byte to write.
3074 * @byte: The byte to write to the NVM.
3076 * Writes a single byte to the NVM using the flash access registers.
3077 * Goes through a retry algorithm before giving up.
3079 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3080 u32 offset, u8 byte)
3083 u16 program_retries;
3085 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3087 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3091 for (program_retries = 0; program_retries < 100; program_retries++) {
3092 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3094 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3095 if (ret_val == E1000_SUCCESS)
3098 if (program_retries == 100)
3099 return -E1000_ERR_NVM;
3101 return E1000_SUCCESS;
3105 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3106 * @hw: pointer to the HW structure
3107 * @bank: 0 for first bank, 1 for second bank, etc.
3109 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3110 * bank N is 4096 * N + flash_reg_addr.
3112 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3114 struct e1000_nvm_info *nvm = &hw->nvm;
3115 union ich8_hws_flash_status hsfsts;
3116 union ich8_hws_flash_ctrl hsflctl;
3117 u32 flash_linear_addr;
3118 /* bank size is in 16bit words - adjust to bytes */
3119 u32 flash_bank_size = nvm->flash_bank_size * 2;
3122 s32 j, iteration, sector_size;
3124 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3126 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3128 /* Determine HW Sector size: Read BERASE bits of hw flash status
3130 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3131 * consecutive sectors. The start index for the nth Hw sector
3132 * can be calculated as = bank * 4096 + n * 256
3133 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3134 * The start index for the nth Hw sector can be calculated
3136 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3137 * (ich9 only, otherwise error condition)
3138 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3140 switch (hsfsts.hsf_status.berasesz) {
3142 /* Hw sector size 256 */
3143 sector_size = ICH_FLASH_SEG_SIZE_256;
3144 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3147 sector_size = ICH_FLASH_SEG_SIZE_4K;
3151 sector_size = ICH_FLASH_SEG_SIZE_8K;
3155 sector_size = ICH_FLASH_SEG_SIZE_64K;
3159 return -E1000_ERR_NVM;
3162 /* Start with the base address, then add the sector offset. */
3163 flash_linear_addr = hw->nvm.flash_base_addr;
3164 flash_linear_addr += (bank) ? flash_bank_size : 0;
3166 for (j = 0; j < iteration; j++) {
3168 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3171 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3175 /* Write a value 11 (block Erase) in Flash
3176 * Cycle field in hw flash control
3178 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
3180 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3181 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3184 /* Write the last 24 bits of an index within the
3185 * block into Flash Linear address field in Flash
3188 flash_linear_addr += (j * sector_size);
3189 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3192 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3193 if (ret_val == E1000_SUCCESS)
3196 /* Check if FCERR is set to 1. If 1,
3197 * clear it and try the whole sequence
3198 * a few more times else Done
3200 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3202 if (hsfsts.hsf_status.flcerr)
3203 /* repeat for some time before giving up */
3205 else if (!hsfsts.hsf_status.flcdone)
3207 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
3210 return E1000_SUCCESS;
3214 * e1000_valid_led_default_ich8lan - Set the default LED settings
3215 * @hw: pointer to the HW structure
3216 * @data: Pointer to the LED settings
3218 * Reads the LED default settings from the NVM to data. If the NVM LED
3219 * settings is all 0's or F's, set the LED default to a valid LED default
3222 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3226 DEBUGFUNC("e1000_valid_led_default_ich8lan");
3228 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
3230 DEBUGOUT("NVM Read Error\n");
3234 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3235 *data = ID_LED_DEFAULT_ICH8LAN;
3237 return E1000_SUCCESS;
3241 * e1000_id_led_init_pchlan - store LED configurations
3242 * @hw: pointer to the HW structure
3244 * PCH does not control LEDs via the LEDCTL register, rather it uses
3245 * the PHY LED configuration register.
3247 * PCH also does not have an "always on" or "always off" mode which
3248 * complicates the ID feature. Instead of using the "on" mode to indicate
3249 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
3250 * use "link_up" mode. The LEDs will still ID on request if there is no
3251 * link based on logic in e1000_led_[on|off]_pchlan().
3253 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
3255 struct e1000_mac_info *mac = &hw->mac;
3257 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
3258 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
3259 u16 data, i, temp, shift;
3261 DEBUGFUNC("e1000_id_led_init_pchlan");
3263 /* Get default ID LED modes */
3264 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
3268 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
3269 mac->ledctl_mode1 = mac->ledctl_default;
3270 mac->ledctl_mode2 = mac->ledctl_default;
3272 for (i = 0; i < 4; i++) {
3273 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
3276 case ID_LED_ON1_DEF2:
3277 case ID_LED_ON1_ON2:
3278 case ID_LED_ON1_OFF2:
3279 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3280 mac->ledctl_mode1 |= (ledctl_on << shift);
3282 case ID_LED_OFF1_DEF2:
3283 case ID_LED_OFF1_ON2:
3284 case ID_LED_OFF1_OFF2:
3285 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
3286 mac->ledctl_mode1 |= (ledctl_off << shift);
3293 case ID_LED_DEF1_ON2:
3294 case ID_LED_ON1_ON2:
3295 case ID_LED_OFF1_ON2:
3296 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3297 mac->ledctl_mode2 |= (ledctl_on << shift);
3299 case ID_LED_DEF1_OFF2:
3300 case ID_LED_ON1_OFF2:
3301 case ID_LED_OFF1_OFF2:
3302 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3303 mac->ledctl_mode2 |= (ledctl_off << shift);
3311 return E1000_SUCCESS;
3315 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3316 * @hw: pointer to the HW structure
3318 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3319 * register, so the the bus width is hard coded.
3321 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3323 struct e1000_bus_info *bus = &hw->bus;
3326 DEBUGFUNC("e1000_get_bus_info_ich8lan");
3328 ret_val = e1000_get_bus_info_pcie_generic(hw);
3330 /* ICH devices are "PCI Express"-ish. They have
3331 * a configuration space, but do not contain
3332 * PCI Express Capability registers, so bus width
3333 * must be hardcoded.
3335 if (bus->width == e1000_bus_width_unknown)
3336 bus->width = e1000_bus_width_pcie_x1;
3342 * e1000_reset_hw_ich8lan - Reset the hardware
3343 * @hw: pointer to the HW structure
3345 * Does a full reset of the hardware which includes a reset of the PHY and
3348 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3350 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3355 DEBUGFUNC("e1000_reset_hw_ich8lan");
3357 /* Prevent the PCI-E bus from sticking if there is no TLP connection
3358 * on the last TLP read/write transaction when MAC is reset.
3360 ret_val = e1000_disable_pcie_master_generic(hw);
3362 DEBUGOUT("PCI-E Master disable polling has failed.\n");
3364 DEBUGOUT("Masking off all interrupts\n");
3365 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3367 /* Disable the Transmit and Receive units. Then delay to allow
3368 * any pending transactions to complete before we hit the MAC
3369 * with the global reset.
3371 E1000_WRITE_REG(hw, E1000_RCTL, 0);
3372 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
3373 E1000_WRITE_FLUSH(hw);
3377 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3378 if (hw->mac.type == e1000_ich8lan) {
3379 /* Set Tx and Rx buffer allocation to 8k apiece. */
3380 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
3381 /* Set Packet Buffer Size to 16k. */
3382 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
3385 if (hw->mac.type == e1000_pchlan) {
3386 /* Save the NVM K1 bit setting*/
3387 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3391 if (kum_cfg & E1000_NVM_K1_ENABLE)
3392 dev_spec->nvm_k1_enabled = true;
3394 dev_spec->nvm_k1_enabled = false;
3397 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3399 if (!hw->phy.ops.check_reset_block(hw)) {
3400 /* Full-chip reset requires MAC and PHY reset at the same
3401 * time to make sure the interface between MAC and the
3402 * external PHY is reset.
3404 ctrl |= E1000_CTRL_PHY_RST;
3406 /* Gate automatic PHY configuration by hardware on
3409 if ((hw->mac.type == e1000_pch2lan) &&
3410 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3411 e1000_gate_hw_phy_config_ich8lan(hw, true);
3413 ret_val = e1000_acquire_swflag_ich8lan(hw);
3414 DEBUGOUT("Issuing a global reset to ich8lan\n");
3415 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
3416 /* cannot issue a flush here because it hangs the hardware */
3419 /* Set Phy Config Counter to 50msec */
3420 if (hw->mac.type == e1000_pch2lan) {
3421 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
3422 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3423 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3424 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
3428 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
3430 if (ctrl & E1000_CTRL_PHY_RST) {
3431 ret_val = hw->phy.ops.get_cfg_done(hw);
3435 ret_val = e1000_post_phy_reset_ich8lan(hw);
3440 /* For PCH, this write will make sure that any noise
3441 * will be detected as a CRC error and be dropped rather than show up
3442 * as a bad packet to the DMA engine.
3444 if (hw->mac.type == e1000_pchlan)
3445 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
3447 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
3448 E1000_READ_REG(hw, E1000_ICR);
3450 reg = E1000_READ_REG(hw, E1000_KABGTXD);
3451 reg |= E1000_KABGTXD_BGSQLBIAS;
3452 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
3454 return E1000_SUCCESS;
3458 * e1000_init_hw_ich8lan - Initialize the hardware
3459 * @hw: pointer to the HW structure
3461 * Prepares the hardware for transmit and receive by doing the following:
3462 * - initialize hardware bits
3463 * - initialize LED identification
3464 * - setup receive address registers
3465 * - setup flow control
3466 * - setup transmit descriptors
3467 * - clear statistics
3469 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3471 struct e1000_mac_info *mac = &hw->mac;
3472 u32 ctrl_ext, txdctl, snoop;
3476 DEBUGFUNC("e1000_init_hw_ich8lan");
3478 e1000_initialize_hw_bits_ich8lan(hw);
3480 /* Initialize identification LED */
3481 ret_val = mac->ops.id_led_init(hw);
3482 /* An error is not fatal and we should not stop init due to this */
3484 DEBUGOUT("Error initializing identification LED\n");
3486 /* Setup the receive address. */
3487 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
3489 /* Zero out the Multicast HASH table */
3490 DEBUGOUT("Zeroing the MTA\n");
3491 for (i = 0; i < mac->mta_reg_count; i++)
3492 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3494 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
3495 * the ME. Disable wakeup by clearing the host wakeup bit.
3496 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3498 if (hw->phy.type == e1000_phy_82578) {
3499 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
3500 i &= ~BM_WUC_HOST_WU_BIT;
3501 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
3502 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3507 /* Setup link and flow control */
3508 ret_val = mac->ops.setup_link(hw);
3510 /* Set the transmit descriptor write-back policy for both queues */
3511 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
3512 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
3513 E1000_TXDCTL_FULL_TX_DESC_WB);
3514 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
3515 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
3516 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
3517 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
3518 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
3519 E1000_TXDCTL_FULL_TX_DESC_WB);
3520 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
3521 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
3522 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
3524 /* ICH8 has opposite polarity of no_snoop bits.
3525 * By default, we should use snoop behavior.
3527 if (mac->type == e1000_ich8lan)
3528 snoop = PCIE_ICH8_SNOOP_ALL;
3530 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3531 e1000_set_pcie_no_snoop_generic(hw, snoop);
3533 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3534 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3535 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3537 /* Clear all of the statistics registers (clear on read). It is
3538 * important that we do this after we have tried to establish link
3539 * because the symbol error count will increment wildly if there
3542 e1000_clear_hw_cntrs_ich8lan(hw);
3548 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3549 * @hw: pointer to the HW structure
3551 * Sets/Clears required hardware bits necessary for correctly setting up the
3552 * hardware for transmit and receive.
3554 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3558 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
3560 /* Extended Device Control */
3561 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
3563 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3564 if (hw->mac.type >= e1000_pchlan)
3565 reg |= E1000_CTRL_EXT_PHYPDEN;
3566 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
3568 /* Transmit Descriptor Control 0 */
3569 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
3571 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
3573 /* Transmit Descriptor Control 1 */
3574 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
3576 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
3578 /* Transmit Arbitration Control 0 */
3579 reg = E1000_READ_REG(hw, E1000_TARC(0));
3580 if (hw->mac.type == e1000_ich8lan)
3581 reg |= (1 << 28) | (1 << 29);
3582 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3583 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
3585 /* Transmit Arbitration Control 1 */
3586 reg = E1000_READ_REG(hw, E1000_TARC(1));
3587 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
3591 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3592 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
3595 if (hw->mac.type == e1000_ich8lan) {
3596 reg = E1000_READ_REG(hw, E1000_STATUS);
3598 E1000_WRITE_REG(hw, E1000_STATUS, reg);
3601 /* work-around descriptor data corruption issue during nfs v2 udp
3602 * traffic, just disable the nfs filtering capability
3604 reg = E1000_READ_REG(hw, E1000_RFCTL);
3605 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3607 /* Disable IPv6 extension header parsing because some malformed
3608 * IPv6 headers can hang the Rx.
3610 if (hw->mac.type == e1000_ich8lan)
3611 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3612 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
3618 * e1000_setup_link_ich8lan - Setup flow control and link settings
3619 * @hw: pointer to the HW structure
3621 * Determines which flow control settings to use, then configures flow
3622 * control. Calls the appropriate media-specific link configuration
3623 * function. Assuming the adapter has a valid link partner, a valid link
3624 * should be established. Assumes the hardware has previously been reset
3625 * and the transmitter and receiver are not enabled.
3627 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3631 DEBUGFUNC("e1000_setup_link_ich8lan");
3633 if (hw->phy.ops.check_reset_block(hw))
3634 return E1000_SUCCESS;
3636 /* ICH parts do not have a word in the NVM to determine
3637 * the default flow control setting, so we explicitly
3640 if (hw->fc.requested_mode == e1000_fc_default)
3641 hw->fc.requested_mode = e1000_fc_full;
3643 /* Save off the requested flow control mode for use later. Depending
3644 * on the link partner's capabilities, we may or may not use this mode.
3646 hw->fc.current_mode = hw->fc.requested_mode;
3648 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
3649 hw->fc.current_mode);
3651 /* Continue to configure the copper link. */
3652 ret_val = hw->mac.ops.setup_physical_interface(hw);
3656 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
3657 if ((hw->phy.type == e1000_phy_82578) ||
3658 (hw->phy.type == e1000_phy_82579) ||
3659 (hw->phy.type == e1000_phy_i217) ||
3660 (hw->phy.type == e1000_phy_82577)) {
3661 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
3663 ret_val = hw->phy.ops.write_reg(hw,
3664 PHY_REG(BM_PORT_CTRL_PAGE, 27),
3670 return e1000_set_fc_watermarks_generic(hw);
3674 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3675 * @hw: pointer to the HW structure
3677 * Configures the kumeran interface to the PHY to wait the appropriate time
3678 * when polling the PHY, then call the generic setup_copper_link to finish
3679 * configuring the copper link.
3681 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3687 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
3689 ctrl = E1000_READ_REG(hw, E1000_CTRL);
3690 ctrl |= E1000_CTRL_SLU;
3691 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3692 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3694 /* Set the mac to wait the maximum time between each iteration
3695 * and increase the max iterations when polling the phy;
3696 * this fixes erroneous timeouts at 10Mbps.
3698 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
3702 ret_val = e1000_read_kmrn_reg_generic(hw,
3703 E1000_KMRNCTRLSTA_INBAND_PARAM,
3708 ret_val = e1000_write_kmrn_reg_generic(hw,
3709 E1000_KMRNCTRLSTA_INBAND_PARAM,
3714 switch (hw->phy.type) {
3715 case e1000_phy_igp_3:
3716 ret_val = e1000_copper_link_setup_igp(hw);
3721 case e1000_phy_82578:
3722 ret_val = e1000_copper_link_setup_m88(hw);
3726 case e1000_phy_82577:
3727 case e1000_phy_82579:
3728 case e1000_phy_i217:
3729 ret_val = e1000_copper_link_setup_82577(hw);
3734 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
3739 reg_data &= ~IFE_PMC_AUTO_MDIX;
3741 switch (hw->phy.mdix) {
3743 reg_data &= ~IFE_PMC_FORCE_MDIX;
3746 reg_data |= IFE_PMC_FORCE_MDIX;
3750 reg_data |= IFE_PMC_AUTO_MDIX;
3753 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
3762 return e1000_setup_copper_link_generic(hw);
3766 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3767 * @hw: pointer to the HW structure
3768 * @speed: pointer to store current link speed
3769 * @duplex: pointer to store the current link duplex
3771 * Calls the generic get_speed_and_duplex to retrieve the current link
3772 * information and then calls the Kumeran lock loss workaround for links at
3775 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3780 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
3782 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
3786 if ((hw->mac.type == e1000_ich8lan) &&
3787 (hw->phy.type == e1000_phy_igp_3) &&
3788 (*speed == SPEED_1000)) {
3789 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3796 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3797 * @hw: pointer to the HW structure
3799 * Work-around for 82566 Kumeran PCS lock loss:
3800 * On link status change (i.e. PCI reset, speed change) and link is up and
3802 * 0) if workaround is optionally disabled do nothing
3803 * 1) wait 1ms for Kumeran link to come up
3804 * 2) check Kumeran Diagnostic register PCS lock loss bit
3805 * 3) if not set the link is locked (all is good), otherwise...
3807 * 5) repeat up to 10 times
3808 * Note: this is only called for IGP3 copper when speed is 1gb.
3810 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3812 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3818 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
3820 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3821 return E1000_SUCCESS;
3823 /* Make sure link is up before proceeding. If not just return.
3824 * Attempting this while link is negotiating fouled up link
3827 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
3829 return E1000_SUCCESS;
3831 for (i = 0; i < 10; i++) {
3832 /* read once to clear */
3833 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3836 /* and again to get new status */
3837 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
3841 /* check for PCS lock */
3842 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3843 return E1000_SUCCESS;
3845 /* Issue PHY reset */
3846 hw->phy.ops.reset(hw);
3849 /* Disable GigE link negotiation */
3850 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3851 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3852 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3853 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3855 /* Call gig speed drop workaround on Gig disable before accessing
3858 e1000_gig_downshift_workaround_ich8lan(hw);
3860 /* unable to acquire PCS lock */
3861 return -E1000_ERR_PHY;
3865 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3866 * @hw: pointer to the HW structure
3867 * @state: boolean value used to set the current Kumeran workaround state
3869 * If ICH8, set the current Kumeran workaround state (enabled - true
3870 * /disabled - false).
3872 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3875 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3877 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
3879 if (hw->mac.type != e1000_ich8lan) {
3880 DEBUGOUT("Workaround applies to ICH8 only.\n");
3884 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3890 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3891 * @hw: pointer to the HW structure
3893 * Workaround for 82566 power-down on D3 entry:
3894 * 1) disable gigabit link
3895 * 2) write VR power-down enable
3897 * Continue if successful, else issue LCD reset and repeat
3899 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3905 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
3907 if (hw->phy.type != e1000_phy_igp_3)
3910 /* Try the workaround twice (if needed) */
3913 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
3914 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3915 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3916 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
3918 /* Call gig speed drop workaround on Gig disable before
3919 * accessing any PHY registers
3921 if (hw->mac.type == e1000_ich8lan)
3922 e1000_gig_downshift_workaround_ich8lan(hw);
3924 /* Write VR power-down enable */
3925 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3926 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3927 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
3928 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3930 /* Read it back and test */
3931 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
3932 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3933 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3936 /* Issue PHY reset and repeat at most one more time */
3937 reg = E1000_READ_REG(hw, E1000_CTRL);
3938 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
3944 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3945 * @hw: pointer to the HW structure
3947 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3948 * LPLU, Gig disable, MDIC PHY reset):
3949 * 1) Set Kumeran Near-end loopback
3950 * 2) Clear Kumeran Near-end loopback
3951 * Should only be called for ICH8[m] devices with any 1G Phy.
3953 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3958 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
3960 if ((hw->mac.type != e1000_ich8lan) ||
3961 (hw->phy.type == e1000_phy_ife))
3964 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3968 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3969 ret_val = e1000_write_kmrn_reg_generic(hw,
3970 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3974 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3975 ret_val = e1000_write_kmrn_reg_generic(hw,
3976 E1000_KMRNCTRLSTA_DIAG_OFFSET,
3981 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3982 * @hw: pointer to the HW structure
3984 * During S0 to Sx transition, it is possible the link remains at gig
3985 * instead of negotiating to a lower speed. Before going to Sx, set
3986 * 'Gig Disable' to force link speed negotiation to a lower speed based on
3987 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
3988 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3989 * needs to be written.
3990 * Parts that support (and are linked to a partner which support) EEE in
3991 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
3992 * than 10Mbps w/o EEE.
3994 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3996 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4000 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4002 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4003 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4005 if (hw->phy.type == e1000_phy_i217) {
4008 ret_val = hw->phy.ops.acquire(hw);
4012 if (!dev_spec->eee_disable) {
4016 e1000_read_emi_reg_locked(hw,
4017 I217_EEE_ADVERTISEMENT,
4022 /* Disable LPLU if both link partners support 100BaseT
4023 * EEE and 100Full is advertised on both ends of the
4026 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4027 (dev_spec->eee_lp_ability &
4028 I82579_EEE_100_SUPPORTED) &&
4029 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4030 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4031 E1000_PHY_CTRL_NOND0A_LPLU);
4034 /* For i217 Intel Rapid Start Technology support,
4035 * when the system is going into Sx and no manageability engine
4036 * is present, the driver must configure proxy to reset only on
4037 * power good. LPI (Low Power Idle) state must also reset only
4038 * on power good, as well as the MTA (Multicast table array).
4039 * The SMBus release must also be disabled on LCD reset.
4041 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4042 E1000_ICH_FWSM_FW_VALID)) {
4043 /* Enable proxy to reset only on power good. */
4044 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4046 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4047 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4050 /* Set bit enable LPI (EEE) to reset only on
4053 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4054 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4055 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4057 /* Disable the SMB release on LCD reset. */
4058 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4059 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4060 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4063 /* Enable MTA to reset for Intel Rapid Start Technology
4066 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4067 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4068 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4071 hw->phy.ops.release(hw);
4074 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4076 if (hw->mac.type == e1000_ich8lan)
4077 e1000_gig_downshift_workaround_ich8lan(hw);
4079 if (hw->mac.type >= e1000_pchlan) {
4080 e1000_oem_bits_config_ich8lan(hw, false);
4082 /* Reset PHY to activate OEM bits on 82577/8 */
4083 if (hw->mac.type == e1000_pchlan)
4084 e1000_phy_hw_reset_generic(hw);
4086 ret_val = hw->phy.ops.acquire(hw);
4089 e1000_write_smbus_addr(hw);
4090 hw->phy.ops.release(hw);
4097 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4098 * @hw: pointer to the HW structure
4100 * During Sx to S0 transitions on non-managed devices or managed devices
4101 * on which PHY resets are not blocked, if the PHY registers cannot be
4102 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
4104 * On i217, setup Intel Rapid Start Technology.
4106 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4110 DEBUGFUNC("e1000_resume_workarounds_pchlan");
4112 if (hw->mac.type < e1000_pch2lan)
4115 ret_val = e1000_init_phy_workarounds_pchlan(hw);
4117 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4121 /* For i217 Intel Rapid Start Technology support when the system
4122 * is transitioning from Sx and no manageability engine is present
4123 * configure SMBus to restore on reset, disable proxy, and enable
4124 * the reset on MTA (Multicast table array).
4126 if (hw->phy.type == e1000_phy_i217) {
4129 ret_val = hw->phy.ops.acquire(hw);
4131 DEBUGOUT("Failed to setup iRST\n");
4135 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4136 E1000_ICH_FWSM_FW_VALID)) {
4137 /* Restore clear on SMB if no manageability engine
4140 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
4144 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4145 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4148 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
4150 /* Enable reset on MTA */
4151 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
4155 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4156 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4159 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
4160 hw->phy.ops.release(hw);
4165 * e1000_cleanup_led_ich8lan - Restore the default LED operation
4166 * @hw: pointer to the HW structure
4168 * Return the LED back to the default configuration.
4170 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
4172 DEBUGFUNC("e1000_cleanup_led_ich8lan");
4174 if (hw->phy.type == e1000_phy_ife)
4175 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4178 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
4179 return E1000_SUCCESS;
4183 * e1000_led_on_ich8lan - Turn LEDs on
4184 * @hw: pointer to the HW structure
4188 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
4190 DEBUGFUNC("e1000_led_on_ich8lan");
4192 if (hw->phy.type == e1000_phy_ife)
4193 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4194 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
4196 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
4197 return E1000_SUCCESS;
4201 * e1000_led_off_ich8lan - Turn LEDs off
4202 * @hw: pointer to the HW structure
4204 * Turn off the LEDs.
4206 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
4208 DEBUGFUNC("e1000_led_off_ich8lan");
4210 if (hw->phy.type == e1000_phy_ife)
4211 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4212 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
4214 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
4215 return E1000_SUCCESS;
4219 * e1000_setup_led_pchlan - Configures SW controllable LED
4220 * @hw: pointer to the HW structure
4222 * This prepares the SW controllable LED for use.
4224 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
4226 DEBUGFUNC("e1000_setup_led_pchlan");
4228 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4229 (u16)hw->mac.ledctl_mode1);
4233 * e1000_cleanup_led_pchlan - Restore the default LED operation
4234 * @hw: pointer to the HW structure
4236 * Return the LED back to the default configuration.
4238 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
4240 DEBUGFUNC("e1000_cleanup_led_pchlan");
4242 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
4243 (u16)hw->mac.ledctl_default);
4247 * e1000_led_on_pchlan - Turn LEDs on
4248 * @hw: pointer to the HW structure
4252 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4254 u16 data = (u16)hw->mac.ledctl_mode2;
4257 DEBUGFUNC("e1000_led_on_pchlan");
4259 /* If no link, then turn LED on by setting the invert bit
4260 * for each LED that's mode is "link_up" in ledctl_mode2.
4262 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4263 for (i = 0; i < 3; i++) {
4264 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4265 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4266 E1000_LEDCTL_MODE_LINK_UP)
4268 if (led & E1000_PHY_LED0_IVRT)
4269 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4271 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4275 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4279 * e1000_led_off_pchlan - Turn LEDs off
4280 * @hw: pointer to the HW structure
4282 * Turn off the LEDs.
4284 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4286 u16 data = (u16)hw->mac.ledctl_mode1;
4289 DEBUGFUNC("e1000_led_off_pchlan");
4291 /* If no link, then turn LED off by clearing the invert bit
4292 * for each LED that's mode is "link_up" in ledctl_mode1.
4294 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
4295 for (i = 0; i < 3; i++) {
4296 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
4297 if ((led & E1000_PHY_LED0_MODE_MASK) !=
4298 E1000_LEDCTL_MODE_LINK_UP)
4300 if (led & E1000_PHY_LED0_IVRT)
4301 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
4303 data |= (E1000_PHY_LED0_IVRT << (i * 5));
4307 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
4311 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4312 * @hw: pointer to the HW structure
4314 * Read appropriate register for the config done bit for completion status
4315 * and configure the PHY through s/w for EEPROM-less parts.
4317 * NOTE: some silicon which is EEPROM-less will fail trying to read the
4318 * config done bit, so only an error is logged and continues. If we were
4319 * to return with error, EEPROM-less silicon would not be able to be reset
4322 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4324 s32 ret_val = E1000_SUCCESS;
4328 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
4330 e1000_get_cfg_done_generic(hw);
4332 /* Wait for indication from h/w that it has completed basic config */
4333 if (hw->mac.type >= e1000_ich10lan) {
4334 e1000_lan_init_done_ich8lan(hw);
4336 ret_val = e1000_get_auto_rd_done_generic(hw);
4338 /* When auto config read does not complete, do not
4339 * return with an error. This can happen in situations
4340 * where there is no eeprom and prevents getting link.
4342 DEBUGOUT("Auto Read Done did not complete\n");
4343 ret_val = E1000_SUCCESS;
4347 /* Clear PHY Reset Asserted bit */
4348 status = E1000_READ_REG(hw, E1000_STATUS);
4349 if (status & E1000_STATUS_PHYRA)
4350 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
4352 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
4354 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
4355 if (hw->mac.type <= e1000_ich9lan) {
4356 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
4357 (hw->phy.type == e1000_phy_igp_3)) {
4358 e1000_phy_init_script_igp3(hw);
4361 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
4362 /* Maybe we should do a basic PHY config */
4363 DEBUGOUT("EEPROM not present\n");
4364 ret_val = -E1000_ERR_CONFIG;
4372 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
4373 * @hw: pointer to the HW structure
4375 * In the case of a PHY power down to save power, or to turn off link during a
4376 * driver unload, or wake on lan is not enabled, remove the link.
4378 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
4380 /* If the management interface is not enabled, then power down */
4381 if (!(hw->mac.ops.check_mng_mode(hw) ||
4382 hw->phy.ops.check_reset_block(hw)))
4383 e1000_power_down_phy_copper(hw);
4389 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
4390 * @hw: pointer to the HW structure
4392 * Clears hardware counters specific to the silicon family and calls
4393 * clear_hw_cntrs_generic to clear all general purpose counters.
4395 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
4400 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
4402 e1000_clear_hw_cntrs_base_generic(hw);
4404 E1000_READ_REG(hw, E1000_ALGNERRC);
4405 E1000_READ_REG(hw, E1000_RXERRC);
4406 E1000_READ_REG(hw, E1000_TNCRS);
4407 E1000_READ_REG(hw, E1000_CEXTERR);
4408 E1000_READ_REG(hw, E1000_TSCTC);
4409 E1000_READ_REG(hw, E1000_TSCTFC);
4411 E1000_READ_REG(hw, E1000_MGTPRC);
4412 E1000_READ_REG(hw, E1000_MGTPDC);
4413 E1000_READ_REG(hw, E1000_MGTPTC);
4415 E1000_READ_REG(hw, E1000_IAC);
4416 E1000_READ_REG(hw, E1000_ICRXOC);
4418 /* Clear PHY statistics registers */
4419 if ((hw->phy.type == e1000_phy_82578) ||
4420 (hw->phy.type == e1000_phy_82579) ||
4421 (hw->phy.type == e1000_phy_i217) ||
4422 (hw->phy.type == e1000_phy_82577)) {
4423 ret_val = hw->phy.ops.acquire(hw);
4426 ret_val = hw->phy.ops.set_page(hw,
4427 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4430 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4431 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4432 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4433 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4434 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4435 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4436 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4437 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4438 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4439 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4440 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4441 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4442 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4443 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4445 hw->phy.ops.release(hw);