92d92c8afde2dcd019d921583d6bd0fb53473854
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
98                                u16 *data);
99 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
100                                     u16 words, u16 *data);
101 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
102 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
103 STATIC s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
104 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
105                                             u16 *data);
106 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
107 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
110 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
111 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
112 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
113 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
114                                            u16 *speed, u16 *duplex);
115 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
116 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
117 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
118 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
119 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
120 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
121 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
122 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
123 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
125 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
126 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
127 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
128                                           u32 offset, u8 *data);
129 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
130                                           u8 size, u16 *data);
131 STATIC s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
132                                             u32 *data);
133 STATIC s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
134                                            u32 offset, u32 *data);
135 STATIC s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
136                                              u32 offset, u32 data);
137 STATIC s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
138                                                   u32 offset, u32 dword);
139 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
140                                           u32 offset, u16 *data);
141 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
142                                                  u32 offset, u8 byte);
143 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
144 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
145 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
146 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
147 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
148 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
149
150 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
151 /* Offset 04h HSFSTS */
152 union ich8_hws_flash_status {
153         struct ich8_hsfsts {
154                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
155                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
156                 u16 dael:1; /* bit 2 Direct Access error Log */
157                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
158                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
159                 u16 reserved1:2; /* bit 13:6 Reserved */
160                 u16 reserved2:6; /* bit 13:6 Reserved */
161                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
162                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
163         } hsf_status;
164         u16 regval;
165 };
166
167 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
168 /* Offset 06h FLCTL */
169 union ich8_hws_flash_ctrl {
170         struct ich8_hsflctl {
171                 u16 flcgo:1;   /* 0 Flash Cycle Go */
172                 u16 flcycle:2;   /* 2:1 Flash Cycle */
173                 u16 reserved:5;   /* 7:3 Reserved  */
174                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
175                 u16 flockdn:6;   /* 15:10 Reserved */
176         } hsf_ctrl;
177         u16 regval;
178 };
179
180 /* ICH Flash Region Access Permissions */
181 union ich8_hws_flash_regacc {
182         struct ich8_flracc {
183                 u32 grra:8; /* 0:7 GbE region Read Access */
184                 u32 grwa:8; /* 8:15 GbE region Write Access */
185                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
186                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
187         } hsf_flregacc;
188         u16 regval;
189 };
190
191 /**
192  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
193  *  @hw: pointer to the HW structure
194  *
195  *  Test access to the PHY registers by reading the PHY ID registers.  If
196  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
197  *  otherwise assume the read PHY ID is correct if it is valid.
198  *
199  *  Assumes the sw/fw/hw semaphore is already acquired.
200  **/
201 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
202 {
203         u16 phy_reg = 0;
204         u32 phy_id = 0;
205         s32 ret_val = 0;
206         u16 retry_count;
207         u32 mac_reg = 0;
208
209         for (retry_count = 0; retry_count < 2; retry_count++) {
210                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
211                 if (ret_val || (phy_reg == 0xFFFF))
212                         continue;
213                 phy_id = (u32)(phy_reg << 16);
214
215                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
216                 if (ret_val || (phy_reg == 0xFFFF)) {
217                         phy_id = 0;
218                         continue;
219                 }
220                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
221                 break;
222         }
223
224         if (hw->phy.id) {
225                 if  (hw->phy.id == phy_id)
226                         goto out;
227         } else if (phy_id) {
228                 hw->phy.id = phy_id;
229                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
230                 goto out;
231         }
232
233         /* In case the PHY needs to be in mdio slow mode,
234          * set slow mode and try to get the PHY id again.
235          */
236         if (hw->mac.type < e1000_pch_lpt) {
237                 hw->phy.ops.release(hw);
238                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
239                 if (!ret_val)
240                         ret_val = e1000_get_phy_id(hw);
241                 hw->phy.ops.acquire(hw);
242         }
243
244         if (ret_val)
245                 return false;
246 out:
247         if (hw->mac.type >= e1000_pch_lpt) {
248                 /* Only unforce SMBus if ME is not active */
249                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
250                     E1000_ICH_FWSM_FW_VALID)) {
251                         /* Unforce SMBus mode in PHY */
252                         hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
253                         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
254                         hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255
256                         /* Unforce SMBus mode in MAC */
257                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
258                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
259                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
260                 }
261         }
262
263         return true;
264 }
265
266 /**
267  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
268  *  @hw: pointer to the HW structure
269  *
270  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
271  *  used to reset the PHY to a quiescent state when necessary.
272  **/
273 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
274 {
275         u32 mac_reg;
276
277         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
278
279         /* Set Phy Config Counter to 50msec */
280         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
281         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
282         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
283         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
284
285         /* Toggle LANPHYPC Value bit */
286         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
287         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
288         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
289         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
290         E1000_WRITE_FLUSH(hw);
291         msec_delay(1);
292         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
294         E1000_WRITE_FLUSH(hw);
295
296         if (hw->mac.type < e1000_pch_lpt) {
297                 msec_delay(50);
298         } else {
299                 u16 count = 20;
300
301                 do {
302                         msec_delay(5);
303                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
304                            E1000_CTRL_EXT_LPCD) && count--);
305
306                 msec_delay(30);
307         }
308 }
309
310 /**
311  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
312  *  @hw: pointer to the HW structure
313  *
314  *  Workarounds/flow necessary for PHY initialization during driver load
315  *  and resume paths.
316  **/
317 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
318 {
319         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
320         s32 ret_val;
321
322         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
323
324         /* Gate automatic PHY configuration by hardware on managed and
325          * non-managed 82579 and newer adapters.
326          */
327         e1000_gate_hw_phy_config_ich8lan(hw, true);
328
329 #ifdef ULP_SUPPORT
330         /* It is not possible to be certain of the current state of ULP
331          * so forcibly disable it.
332          */
333         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
334
335 #endif /* ULP_SUPPORT */
336         ret_val = hw->phy.ops.acquire(hw);
337         if (ret_val) {
338                 DEBUGOUT("Failed to initialize PHY flow\n");
339                 goto out;
340         }
341
342         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
343          * inaccessible and resetting the PHY is not blocked, toggle the
344          * LANPHYPC Value bit to force the interconnect to PCIe mode.
345          */
346         switch (hw->mac.type) {
347         case e1000_pch_lpt:
348         case e1000_pch_spt:
349                 if (e1000_phy_is_accessible_pchlan(hw))
350                         break;
351
352                 /* Before toggling LANPHYPC, see if PHY is accessible by
353                  * forcing MAC to SMBus mode first.
354                  */
355                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
356                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
357                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
358
359                 /* Wait 50 milliseconds for MAC to finish any retries
360                  * that it might be trying to perform from previous
361                  * attempts to acknowledge any phy read requests.
362                  */
363                  msec_delay(50);
364
365                 /* fall-through */
366         case e1000_pch2lan:
367                 if (e1000_phy_is_accessible_pchlan(hw))
368                         break;
369
370                 /* fall-through */
371         case e1000_pchlan:
372                 if ((hw->mac.type == e1000_pchlan) &&
373                     (fwsm & E1000_ICH_FWSM_FW_VALID))
374                         break;
375
376                 if (hw->phy.ops.check_reset_block(hw)) {
377                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
378                         ret_val = -E1000_ERR_PHY;
379                         break;
380                 }
381
382                 /* Toggle LANPHYPC Value bit */
383                 e1000_toggle_lanphypc_pch_lpt(hw);
384                 if (hw->mac.type >= e1000_pch_lpt) {
385                         if (e1000_phy_is_accessible_pchlan(hw))
386                                 break;
387
388                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
389                          * so ensure that the MAC is also out of SMBus mode
390                          */
391                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
392                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
393                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
394
395                         if (e1000_phy_is_accessible_pchlan(hw))
396                                 break;
397
398                         ret_val = -E1000_ERR_PHY;
399                 }
400                 break;
401         default:
402                 break;
403         }
404
405         hw->phy.ops.release(hw);
406         if (!ret_val) {
407
408                 /* Check to see if able to reset PHY.  Print error if not */
409                 if (hw->phy.ops.check_reset_block(hw)) {
410                         ERROR_REPORT("Reset blocked by ME\n");
411                         goto out;
412                 }
413
414                 /* Reset the PHY before any access to it.  Doing so, ensures
415                  * that the PHY is in a known good state before we read/write
416                  * PHY registers.  The generic reset is sufficient here,
417                  * because we haven't determined the PHY type yet.
418                  */
419                 ret_val = e1000_phy_hw_reset_generic(hw);
420                 if (ret_val)
421                         goto out;
422
423                 /* On a successful reset, possibly need to wait for the PHY
424                  * to quiesce to an accessible state before returning control
425                  * to the calling function.  If the PHY does not quiesce, then
426                  * return E1000E_BLK_PHY_RESET, as this is the condition that
427                  *  the PHY is in.
428                  */
429                 ret_val = hw->phy.ops.check_reset_block(hw);
430                 if (ret_val)
431                         ERROR_REPORT("ME blocked access to PHY after reset\n");
432         }
433
434 out:
435         /* Ungate automatic PHY configuration on non-managed 82579 */
436         if ((hw->mac.type == e1000_pch2lan) &&
437             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
438                 msec_delay(10);
439                 e1000_gate_hw_phy_config_ich8lan(hw, false);
440         }
441
442         return ret_val;
443 }
444
445 /**
446  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
447  *  @hw: pointer to the HW structure
448  *
449  *  Initialize family-specific PHY parameters and function pointers.
450  **/
451 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
452 {
453         struct e1000_phy_info *phy = &hw->phy;
454         s32 ret_val;
455
456         DEBUGFUNC("e1000_init_phy_params_pchlan");
457
458         phy->addr               = 1;
459         phy->reset_delay_us     = 100;
460
461         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
462         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
463         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
464         phy->ops.set_page       = e1000_set_page_igp;
465         phy->ops.read_reg       = e1000_read_phy_reg_hv;
466         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
467         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
468         phy->ops.release        = e1000_release_swflag_ich8lan;
469         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
470         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
471         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
472         phy->ops.write_reg      = e1000_write_phy_reg_hv;
473         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
474         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
475         phy->ops.power_up       = e1000_power_up_phy_copper;
476         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
477         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
478
479         phy->id = e1000_phy_unknown;
480
481         ret_val = e1000_init_phy_workarounds_pchlan(hw);
482         if (ret_val)
483                 return ret_val;
484
485         if (phy->id == e1000_phy_unknown)
486                 switch (hw->mac.type) {
487                 default:
488                         ret_val = e1000_get_phy_id(hw);
489                         if (ret_val)
490                                 return ret_val;
491                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
492                                 break;
493                         /* fall-through */
494                 case e1000_pch2lan:
495                 case e1000_pch_lpt:
496                 case e1000_pch_spt:
497                         /* In case the PHY needs to be in mdio slow mode,
498                          * set slow mode and try to get the PHY id again.
499                          */
500                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
501                         if (ret_val)
502                                 return ret_val;
503                         ret_val = e1000_get_phy_id(hw);
504                         if (ret_val)
505                                 return ret_val;
506                         break;
507                 }
508         phy->type = e1000_get_phy_type_from_id(phy->id);
509
510         switch (phy->type) {
511         case e1000_phy_82577:
512         case e1000_phy_82579:
513         case e1000_phy_i217:
514                 phy->ops.check_polarity = e1000_check_polarity_82577;
515                 phy->ops.force_speed_duplex =
516                         e1000_phy_force_speed_duplex_82577;
517                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
518                 phy->ops.get_info = e1000_get_phy_info_82577;
519                 phy->ops.commit = e1000_phy_sw_reset_generic;
520                 break;
521         case e1000_phy_82578:
522                 phy->ops.check_polarity = e1000_check_polarity_m88;
523                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
524                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
525                 phy->ops.get_info = e1000_get_phy_info_m88;
526                 break;
527         default:
528                 ret_val = -E1000_ERR_PHY;
529                 break;
530         }
531
532         return ret_val;
533 }
534
535 /**
536  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
537  *  @hw: pointer to the HW structure
538  *
539  *  Initialize family-specific PHY parameters and function pointers.
540  **/
541 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
542 {
543         struct e1000_phy_info *phy = &hw->phy;
544         s32 ret_val;
545         u16 i = 0;
546
547         DEBUGFUNC("e1000_init_phy_params_ich8lan");
548
549         phy->addr               = 1;
550         phy->reset_delay_us     = 100;
551
552         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
553         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
554         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
555         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
556         phy->ops.read_reg       = e1000_read_phy_reg_igp;
557         phy->ops.release        = e1000_release_swflag_ich8lan;
558         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
559         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
560         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
561         phy->ops.write_reg      = e1000_write_phy_reg_igp;
562         phy->ops.power_up       = e1000_power_up_phy_copper;
563         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
564
565         /* We may need to do this twice - once for IGP and if that fails,
566          * we'll set BM func pointers and try again
567          */
568         ret_val = e1000_determine_phy_address(hw);
569         if (ret_val) {
570                 phy->ops.write_reg = e1000_write_phy_reg_bm;
571                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
572                 ret_val = e1000_determine_phy_address(hw);
573                 if (ret_val) {
574                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
575                         return ret_val;
576                 }
577         }
578
579         phy->id = 0;
580         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
581                (i++ < 100)) {
582                 msec_delay(1);
583                 ret_val = e1000_get_phy_id(hw);
584                 if (ret_val)
585                         return ret_val;
586         }
587
588         /* Verify phy id */
589         switch (phy->id) {
590         case IGP03E1000_E_PHY_ID:
591                 phy->type = e1000_phy_igp_3;
592                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
593                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
594                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
595                 phy->ops.get_info = e1000_get_phy_info_igp;
596                 phy->ops.check_polarity = e1000_check_polarity_igp;
597                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
598                 break;
599         case IFE_E_PHY_ID:
600         case IFE_PLUS_E_PHY_ID:
601         case IFE_C_E_PHY_ID:
602                 phy->type = e1000_phy_ife;
603                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
604                 phy->ops.get_info = e1000_get_phy_info_ife;
605                 phy->ops.check_polarity = e1000_check_polarity_ife;
606                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
607                 break;
608         case BME1000_E_PHY_ID:
609                 phy->type = e1000_phy_bm;
610                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
611                 phy->ops.read_reg = e1000_read_phy_reg_bm;
612                 phy->ops.write_reg = e1000_write_phy_reg_bm;
613                 phy->ops.commit = e1000_phy_sw_reset_generic;
614                 phy->ops.get_info = e1000_get_phy_info_m88;
615                 phy->ops.check_polarity = e1000_check_polarity_m88;
616                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
617                 break;
618         default:
619                 return -E1000_ERR_PHY;
620                 break;
621         }
622
623         return E1000_SUCCESS;
624 }
625
626 /**
627  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
628  *  @hw: pointer to the HW structure
629  *
630  *  Initialize family-specific NVM parameters and function
631  *  pointers.
632  **/
633 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
634 {
635         struct e1000_nvm_info *nvm = &hw->nvm;
636         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
637         u32 gfpreg, sector_base_addr, sector_end_addr;
638         u16 i;
639         u32 nvm_size;
640
641         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
642
643         nvm->type = e1000_nvm_flash_sw;
644
645         if (hw->mac.type >= e1000_pch_spt) {
646                 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
647                  * STRAP register. This is because in SPT the GbE Flash region
648                  * is no longer accessed through the flash registers. Instead,
649                  * the mechanism has changed, and the Flash region access
650                  * registers are now implemented in GbE memory space.
651                  */
652                 nvm->flash_base_addr = 0;
653                 nvm_size =
654                     (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
655                     * NVM_SIZE_MULTIPLIER;
656                 nvm->flash_bank_size = nvm_size / 2;
657                 /* Adjust to word count */
658                 nvm->flash_bank_size /= sizeof(u16);
659                 /* Set the base address for flash register access */
660                 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
661         } else {
662                 /* Can't read flash registers if register set isn't mapped. */
663                 if (!hw->flash_address) {
664                         DEBUGOUT("ERROR: Flash registers not mapped\n");
665                         return -E1000_ERR_CONFIG;
666                 }
667
668                 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
669
670                 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
671                  * Add 1 to sector_end_addr since this sector is included in
672                  * the overall size.
673                  */
674                 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
675                 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
676
677                 /* flash_base_addr is byte-aligned */
678                 nvm->flash_base_addr = sector_base_addr
679                                        << FLASH_SECTOR_ADDR_SHIFT;
680
681                 /* find total size of the NVM, then cut in half since the total
682                  * size represents two separate NVM banks.
683                  */
684                 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
685                                         << FLASH_SECTOR_ADDR_SHIFT);
686                 nvm->flash_bank_size /= 2;
687                 /* Adjust to word count */
688                 nvm->flash_bank_size /= sizeof(u16);
689         }
690
691         nvm->word_size = E1000_SHADOW_RAM_WORDS;
692
693         /* Clear shadow ram */
694         for (i = 0; i < nvm->word_size; i++) {
695                 dev_spec->shadow_ram[i].modified = false;
696                 dev_spec->shadow_ram[i].value    = 0xFFFF;
697         }
698
699         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
700         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
701
702         /* Function Pointers */
703         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
704         nvm->ops.release        = e1000_release_nvm_ich8lan;
705         if (hw->mac.type >= e1000_pch_spt) {
706                 nvm->ops.read   = e1000_read_nvm_spt;
707                 nvm->ops.update = e1000_update_nvm_checksum_spt;
708         } else {
709                 nvm->ops.read   = e1000_read_nvm_ich8lan;
710                 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
711         }
712         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
713         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
714         nvm->ops.write          = e1000_write_nvm_ich8lan;
715
716         return E1000_SUCCESS;
717 }
718
719 /**
720  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
721  *  @hw: pointer to the HW structure
722  *
723  *  Initialize family-specific MAC parameters and function
724  *  pointers.
725  **/
726 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
727 {
728         struct e1000_mac_info *mac = &hw->mac;
729 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
730         u16 pci_cfg;
731 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
732
733         DEBUGFUNC("e1000_init_mac_params_ich8lan");
734
735         /* Set media type function pointer */
736         hw->phy.media_type = e1000_media_type_copper;
737
738         /* Set mta register count */
739         mac->mta_reg_count = 32;
740         /* Set rar entry count */
741         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
742         if (mac->type == e1000_ich8lan)
743                 mac->rar_entry_count--;
744         /* Set if part includes ASF firmware */
745         mac->asf_firmware_present = true;
746         /* FWSM register */
747         mac->has_fwsm = true;
748         /* ARC subsystem not supported */
749         mac->arc_subsystem_valid = false;
750         /* Adaptive IFS supported */
751         mac->adaptive_ifs = true;
752
753         /* Function pointers */
754
755         /* bus type/speed/width */
756         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
757         /* function id */
758         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
759         /* reset */
760         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
761         /* hw initialization */
762         mac->ops.init_hw = e1000_init_hw_ich8lan;
763         /* link setup */
764         mac->ops.setup_link = e1000_setup_link_ich8lan;
765         /* physical interface setup */
766         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
767         /* check for link */
768         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
769         /* link info */
770         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
771         /* multicast address update */
772         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
773         /* clear hardware counters */
774         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
775
776         /* LED and other operations */
777         switch (mac->type) {
778         case e1000_ich8lan:
779         case e1000_ich9lan:
780         case e1000_ich10lan:
781                 /* check management mode */
782                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
783                 /* ID LED init */
784                 mac->ops.id_led_init = e1000_id_led_init_generic;
785                 /* blink LED */
786                 mac->ops.blink_led = e1000_blink_led_generic;
787                 /* setup LED */
788                 mac->ops.setup_led = e1000_setup_led_generic;
789                 /* cleanup LED */
790                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
791                 /* turn on/off LED */
792                 mac->ops.led_on = e1000_led_on_ich8lan;
793                 mac->ops.led_off = e1000_led_off_ich8lan;
794                 break;
795         case e1000_pch2lan:
796                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
797                 mac->ops.rar_set = e1000_rar_set_pch2lan;
798                 /* fall-through */
799         case e1000_pch_lpt:
800         case e1000_pch_spt:
801 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
802                 /* multicast address update for pch2 */
803                 mac->ops.update_mc_addr_list =
804                         e1000_update_mc_addr_list_pch2lan;
805                 /* fall-through */
806 #endif
807         case e1000_pchlan:
808 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
809                 /* save PCH revision_id */
810                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
811                 /* SPT uses full byte for revision ID,
812                  * as opposed to previous generations
813                  */
814                 if (hw->mac.type >= e1000_pch_spt)
815                         hw->revision_id = (u8)(pci_cfg &= 0x00FF);
816                 else
817                         hw->revision_id = (u8)(pci_cfg &= 0x000F);
818 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
819                 /* check management mode */
820                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
821                 /* ID LED init */
822                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
823                 /* setup LED */
824                 mac->ops.setup_led = e1000_setup_led_pchlan;
825                 /* cleanup LED */
826                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
827                 /* turn on/off LED */
828                 mac->ops.led_on = e1000_led_on_pchlan;
829                 mac->ops.led_off = e1000_led_off_pchlan;
830                 break;
831         default:
832                 break;
833         }
834
835         if (mac->type >= e1000_pch_lpt) {
836                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
837                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
838                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
839         }
840
841         /* Enable PCS Lock-loss workaround for ICH8 */
842         if (mac->type == e1000_ich8lan)
843                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
844
845         return E1000_SUCCESS;
846 }
847
848 /**
849  *  __e1000_access_emi_reg_locked - Read/write EMI register
850  *  @hw: pointer to the HW structure
851  *  @addr: EMI address to program
852  *  @data: pointer to value to read/write from/to the EMI address
853  *  @read: boolean flag to indicate read or write
854  *
855  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
856  **/
857 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
858                                          u16 *data, bool read)
859 {
860         s32 ret_val;
861
862         DEBUGFUNC("__e1000_access_emi_reg_locked");
863
864         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
865         if (ret_val)
866                 return ret_val;
867
868         if (read)
869                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
870                                                       data);
871         else
872                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
873                                                        *data);
874
875         return ret_val;
876 }
877
878 /**
879  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
880  *  @hw: pointer to the HW structure
881  *  @addr: EMI address to program
882  *  @data: value to be read from the EMI address
883  *
884  *  Assumes the SW/FW/HW Semaphore is already acquired.
885  **/
886 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
887 {
888         DEBUGFUNC("e1000_read_emi_reg_locked");
889
890         return __e1000_access_emi_reg_locked(hw, addr, data, true);
891 }
892
893 /**
894  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
895  *  @hw: pointer to the HW structure
896  *  @addr: EMI address to program
897  *  @data: value to be written to the EMI address
898  *
899  *  Assumes the SW/FW/HW Semaphore is already acquired.
900  **/
901 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
902 {
903         DEBUGFUNC("e1000_read_emi_reg_locked");
904
905         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
906 }
907
908 /**
909  *  e1000_set_eee_pchlan - Enable/disable EEE support
910  *  @hw: pointer to the HW structure
911  *
912  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
913  *  the link and the EEE capabilities of the link partner.  The LPI Control
914  *  register bits will remain set only if/when link is up.
915  *
916  *  EEE LPI must not be asserted earlier than one second after link is up.
917  *  On 82579, EEE LPI should not be enabled until such time otherwise there
918  *  can be link issues with some switches.  Other devices can have EEE LPI
919  *  enabled immediately upon link up since they have a timer in hardware which
920  *  prevents LPI from being asserted too early.
921  **/
922 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
923 {
924         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
925         s32 ret_val;
926         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
927
928         DEBUGFUNC("e1000_set_eee_pchlan");
929
930         switch (hw->phy.type) {
931         case e1000_phy_82579:
932                 lpa = I82579_EEE_LP_ABILITY;
933                 pcs_status = I82579_EEE_PCS_STATUS;
934                 adv_addr = I82579_EEE_ADVERTISEMENT;
935                 break;
936         case e1000_phy_i217:
937                 lpa = I217_EEE_LP_ABILITY;
938                 pcs_status = I217_EEE_PCS_STATUS;
939                 adv_addr = I217_EEE_ADVERTISEMENT;
940                 break;
941         default:
942                 return E1000_SUCCESS;
943         }
944
945         ret_val = hw->phy.ops.acquire(hw);
946         if (ret_val)
947                 return ret_val;
948
949         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
950         if (ret_val)
951                 goto release;
952
953         /* Clear bits that enable EEE in various speeds */
954         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
955
956         /* Enable EEE if not disabled by user */
957         if (!dev_spec->eee_disable) {
958                 /* Save off link partner's EEE ability */
959                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
960                                                     &dev_spec->eee_lp_ability);
961                 if (ret_val)
962                         goto release;
963
964                 /* Read EEE advertisement */
965                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
966                 if (ret_val)
967                         goto release;
968
969                 /* Enable EEE only for speeds in which the link partner is
970                  * EEE capable and for which we advertise EEE.
971                  */
972                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
973                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
974
975                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
976                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
977                         if (data & NWAY_LPAR_100TX_FD_CAPS)
978                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
979                         else
980                                 /* EEE is not supported in 100Half, so ignore
981                                  * partner's EEE in 100 ability if full-duplex
982                                  * is not advertised.
983                                  */
984                                 dev_spec->eee_lp_ability &=
985                                     ~I82579_EEE_100_SUPPORTED;
986                 }
987         }
988
989         if (hw->phy.type == e1000_phy_82579) {
990                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
991                                                     &data);
992                 if (ret_val)
993                         goto release;
994
995                 data &= ~I82579_LPI_100_PLL_SHUT;
996                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
997                                                      data);
998         }
999
1000         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
1001         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
1002         if (ret_val)
1003                 goto release;
1004
1005         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
1006 release:
1007         hw->phy.ops.release(hw);
1008
1009         return ret_val;
1010 }
1011
1012 /**
1013  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
1014  *  @hw:   pointer to the HW structure
1015  *  @link: link up bool flag
1016  *
1017  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1018  *  preventing further DMA write requests.  Workaround the issue by disabling
1019  *  the de-assertion of the clock request when in 1Gpbs mode.
1020  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1021  *  speeds in order to avoid Tx hangs.
1022  **/
1023 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1024 {
1025         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1026         u32 status = E1000_READ_REG(hw, E1000_STATUS);
1027         s32 ret_val = E1000_SUCCESS;
1028         u16 reg;
1029
1030         if (link && (status & E1000_STATUS_SPEED_1000)) {
1031                 ret_val = hw->phy.ops.acquire(hw);
1032                 if (ret_val)
1033                         return ret_val;
1034
1035                 ret_val =
1036                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1037                                                &reg);
1038                 if (ret_val)
1039                         goto release;
1040
1041                 ret_val =
1042                     e1000_write_kmrn_reg_locked(hw,
1043                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1044                                                 reg &
1045                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1046                 if (ret_val)
1047                         goto release;
1048
1049                 usec_delay(10);
1050
1051                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1052                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1053
1054                 ret_val =
1055                     e1000_write_kmrn_reg_locked(hw,
1056                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1057                                                 reg);
1058 release:
1059                 hw->phy.ops.release(hw);
1060         } else {
1061                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1062                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1063
1064                 if ((hw->phy.revision > 5) || !link ||
1065                     ((status & E1000_STATUS_SPEED_100) &&
1066                      (status & E1000_STATUS_FD)))
1067                         goto update_fextnvm6;
1068
1069                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1070                 if (ret_val)
1071                         return ret_val;
1072
1073                 /* Clear link status transmit timeout */
1074                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1075
1076                 if (status & E1000_STATUS_SPEED_100) {
1077                         /* Set inband Tx timeout to 5x10us for 100Half */
1078                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1079
1080                         /* Do not extend the K1 entry latency for 100Half */
1081                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1082                 } else {
1083                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1084                         reg |= 50 <<
1085                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1086
1087                         /* Extend the K1 entry latency for 10 Mbps */
1088                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1089                 }
1090
1091                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1092                 if (ret_val)
1093                         return ret_val;
1094
1095 update_fextnvm6:
1096                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1097         }
1098
1099         return ret_val;
1100 }
1101
1102 #ifdef ULP_SUPPORT
1103 /**
1104  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1105  *  @hw: pointer to the HW structure
1106  *  @to_sx: boolean indicating a system power state transition to Sx
1107  *
1108  *  When link is down, configure ULP mode to significantly reduce the power
1109  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1110  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1111  *  system, configure the ULP mode by software.
1112  */
1113 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1114 {
1115         u32 mac_reg;
1116         s32 ret_val = E1000_SUCCESS;
1117         u16 phy_reg;
1118         u16 oem_reg = 0;
1119
1120         if ((hw->mac.type < e1000_pch_lpt) ||
1121             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1122             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1123             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1124             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1125             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1126                 return 0;
1127
1128         if (!to_sx) {
1129                 int i = 0;
1130                 /* Poll up to 5 seconds for Cable Disconnected indication */
1131                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1132                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1133                         /* Bail if link is re-acquired */
1134                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1135                                 return -E1000_ERR_PHY;
1136                         if (i++ == 100)
1137                                 break;
1138
1139                         msec_delay(50);
1140                 }
1141                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1142                           (E1000_READ_REG(hw, E1000_FEXT) &
1143                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1144                           i * 50);
1145                 if (!(E1000_READ_REG(hw, E1000_FEXT) &
1146                     E1000_FEXT_PHY_CABLE_DISCONNECTED))
1147                         return 0;
1148         }
1149
1150         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1151                 /* Request ME configure ULP mode in the PHY */
1152                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1153                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1154                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1155
1156                 goto out;
1157         }
1158
1159         ret_val = hw->phy.ops.acquire(hw);
1160         if (ret_val)
1161                 goto out;
1162
1163         /* During S0 Idle keep the phy in PCI-E mode */
1164         if (hw->dev_spec.ich8lan.smbus_disable)
1165                 goto skip_smbus;
1166
1167         /* Force SMBus mode in PHY */
1168         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1169         if (ret_val)
1170                 goto release;
1171         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1172         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1173
1174         /* Force SMBus mode in MAC */
1175         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1176         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1177         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1178
1179         /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1180          * LPLU and disable Gig speed when entering ULP
1181          */
1182         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1183                 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1184                                                        &oem_reg);
1185                 if (ret_val)
1186                         goto release;
1187
1188                 phy_reg = oem_reg;
1189                 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1190
1191                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1192                                                         phy_reg);
1193
1194                 if (ret_val)
1195                         goto release;
1196         }
1197
1198 skip_smbus:
1199         if (!to_sx) {
1200                 /* Change the 'Link Status Change' interrupt to trigger
1201                  * on 'Cable Status Change'
1202                  */
1203                 ret_val = e1000_read_kmrn_reg_locked(hw,
1204                                                      E1000_KMRNCTRLSTA_OP_MODES,
1205                                                      &phy_reg);
1206                 if (ret_val)
1207                         goto release;
1208                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1209                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1210                                             phy_reg);
1211         }
1212
1213         /* Set Inband ULP Exit, Reset to SMBus mode and
1214          * Disable SMBus Release on PERST# in PHY
1215          */
1216         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1217         if (ret_val)
1218                 goto release;
1219         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1220                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1221         if (to_sx) {
1222                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1223                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1224                 else
1225                         phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1226
1227                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1228                 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1229         } else {
1230                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1231                 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1232                 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1233         }
1234         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1235
1236         /* Set Disable SMBus Release on PERST# in MAC */
1237         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1238         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1239         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1240
1241         /* Commit ULP changes in PHY by starting auto ULP configuration */
1242         phy_reg |= I218_ULP_CONFIG1_START;
1243         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1244
1245         if (!to_sx) {
1246                 /* Disable Tx so that the MAC doesn't send any (buffered)
1247                  * packets to the PHY.
1248                  */
1249                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1250                 mac_reg &= ~E1000_TCTL_EN;
1251                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1252         }
1253
1254         if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1255             to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1256                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1257                                                         oem_reg);
1258                 if (ret_val)
1259                         goto release;
1260         }
1261
1262 release:
1263         hw->phy.ops.release(hw);
1264 out:
1265         if (ret_val)
1266                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1267         else
1268                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1269
1270         return ret_val;
1271 }
1272
1273 /**
1274  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1275  *  @hw: pointer to the HW structure
1276  *  @force: boolean indicating whether or not to force disabling ULP
1277  *
1278  *  Un-configure ULP mode when link is up, the system is transitioned from
1279  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1280  *  system, poll for an indication from ME that ULP has been un-configured.
1281  *  If not on an ME enabled system, un-configure the ULP mode by software.
1282  *
1283  *  During nominal operation, this function is called when link is acquired
1284  *  to disable ULP mode (force=false); otherwise, for example when unloading
1285  *  the driver or during Sx->S0 transitions, this is called with force=true
1286  *  to forcibly disable ULP.
1287
1288  *  When the cable is plugged in while the device is in D0, a Cable Status
1289  *  Change interrupt is generated which causes this function to be called
1290  *  to partially disable ULP mode and restart autonegotiation.  This function
1291  *  is then called again due to the resulting Link Status Change interrupt
1292  *  to finish cleaning up after the ULP flow.
1293  */
1294 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1295 {
1296         s32 ret_val = E1000_SUCCESS;
1297         u32 mac_reg;
1298         u16 phy_reg;
1299         int i = 0;
1300
1301         if ((hw->mac.type < e1000_pch_lpt) ||
1302             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1303             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1304             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1305             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1306             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1307                 return 0;
1308
1309         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1310                 if (force) {
1311                         /* Request ME un-configure ULP mode in the PHY */
1312                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1313                         mac_reg &= ~E1000_H2ME_ULP;
1314                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1315                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1316                 }
1317
1318                 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1319                 while (E1000_READ_REG(hw, E1000_FWSM) &
1320                        E1000_FWSM_ULP_CFG_DONE) {
1321                         if (i++ == 30) {
1322                                 ret_val = -E1000_ERR_PHY;
1323                                 goto out;
1324                         }
1325
1326                         msec_delay(10);
1327                 }
1328                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1329
1330                 if (force) {
1331                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1332                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1333                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1334                 } else {
1335                         /* Clear H2ME.ULP after ME ULP configuration */
1336                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1337                         mac_reg &= ~E1000_H2ME_ULP;
1338                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1339
1340                         /* Restore link speed advertisements and restart
1341                          * Auto-negotiation
1342                          */
1343                         if (hw->mac.autoneg) {
1344                                 ret_val = e1000_phy_setup_autoneg(hw);
1345                                 if (ret_val)
1346                                         goto out;
1347                         } else {
1348                                 ret_val = e1000_setup_copper_link_generic(hw);
1349                                 if (ret_val)
1350                                         goto out;
1351                         }
1352                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1353                 }
1354
1355                 goto out;
1356         }
1357
1358         ret_val = hw->phy.ops.acquire(hw);
1359         if (ret_val)
1360                 goto out;
1361
1362         /* Revert the change to the 'Link Status Change'
1363          * interrupt to trigger on 'Cable Status Change'
1364          */
1365         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1366                                              &phy_reg);
1367         if (ret_val)
1368                 goto release;
1369         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1370         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1371
1372         if (force)
1373                 /* Toggle LANPHYPC Value bit */
1374                 e1000_toggle_lanphypc_pch_lpt(hw);
1375
1376         /* Unforce SMBus mode in PHY */
1377         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1378         if (ret_val) {
1379                 /* The MAC might be in PCIe mode, so temporarily force to
1380                  * SMBus mode in order to access the PHY.
1381                  */
1382                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1383                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1384                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1385
1386                 msec_delay(50);
1387
1388                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1389                                                        &phy_reg);
1390                 if (ret_val)
1391                         goto release;
1392         }
1393         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1394         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1395
1396         /* Unforce SMBus mode in MAC */
1397         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1398         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1399         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1400
1401         /* When ULP mode was previously entered, K1 was disabled by the
1402          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1403          */
1404         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1405         if (ret_val)
1406                 goto release;
1407         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1408         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1409
1410         /* Clear ULP enabled configuration */
1411         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1412         if (ret_val)
1413                 goto release;
1414         /* CSC interrupt received due to ULP Indication */
1415         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1416                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1417                              I218_ULP_CONFIG1_STICKY_ULP |
1418                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1419                              I218_ULP_CONFIG1_WOL_HOST |
1420                              I218_ULP_CONFIG1_INBAND_EXIT |
1421                              I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1422                              I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1423                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1424                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1425
1426                 /* Commit ULP changes by starting auto ULP configuration */
1427                 phy_reg |= I218_ULP_CONFIG1_START;
1428                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1429
1430                 /* Clear Disable SMBus Release on PERST# in MAC */
1431                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1432                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1433                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1434
1435                 if (!force) {
1436                         hw->phy.ops.release(hw);
1437
1438                         if (hw->mac.autoneg)
1439                                 e1000_phy_setup_autoneg(hw);
1440                         else
1441                                 e1000_setup_copper_link_generic(hw);
1442
1443                         e1000_sw_lcd_config_ich8lan(hw);
1444
1445                         e1000_oem_bits_config_ich8lan(hw, true);
1446
1447                         /* Set ULP state to unknown and return non-zero to
1448                          * indicate no link (yet) and re-enter on the next LSC
1449                          * to finish disabling ULP flow.
1450                          */
1451                         hw->dev_spec.ich8lan.ulp_state =
1452                             e1000_ulp_state_unknown;
1453
1454                         return 1;
1455                 }
1456         }
1457
1458         /* Re-enable Tx */
1459         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1460         mac_reg |= E1000_TCTL_EN;
1461         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1462
1463 release:
1464         hw->phy.ops.release(hw);
1465         if (force) {
1466                 hw->phy.ops.reset(hw);
1467                 msec_delay(50);
1468         }
1469 out:
1470         if (ret_val)
1471                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1472         else
1473                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1474
1475         return ret_val;
1476 }
1477
1478 #endif /* ULP_SUPPORT */
1479
1480
1481 /**
1482  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1483  *  @hw: pointer to the HW structure
1484  *
1485  *  Checks to see of the link status of the hardware has changed.  If a
1486  *  change in link status has been detected, then we read the PHY registers
1487  *  to get the current speed/duplex if link exists.
1488  **/
1489 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1490 {
1491         struct e1000_mac_info *mac = &hw->mac;
1492         s32 ret_val, tipg_reg = 0;
1493         u16 emi_addr, emi_val = 0;
1494         bool link = false;
1495         u16 phy_reg;
1496
1497         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1498
1499         /* We only want to go out to the PHY registers to see if Auto-Neg
1500          * has completed and/or if our link status has changed.  The
1501          * get_link_status flag is set upon receiving a Link Status
1502          * Change or Rx Sequence Error interrupt.
1503          */
1504         if (!mac->get_link_status)
1505                 return E1000_SUCCESS;
1506
1507         if ((hw->mac.type < e1000_pch_lpt) ||
1508             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1509             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1510                 /* First we want to see if the MII Status Register reports
1511                  * link.  If so, then we want to get the current speed/duplex
1512                  * of the PHY.
1513                  */
1514                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1515                 if (ret_val)
1516                         return ret_val;
1517         } else {
1518                 /* Check the MAC's STATUS register to determine link state
1519                  * since the PHY could be inaccessible while in ULP mode.
1520                  */
1521                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1522                 if (link)
1523                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1524                 else
1525                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1526                 if (ret_val)
1527                         return ret_val;
1528         }
1529
1530         if (hw->mac.type == e1000_pchlan) {
1531                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1532                 if (ret_val)
1533                         return ret_val;
1534         }
1535
1536         /* When connected at 10Mbps half-duplex, some parts are excessively
1537          * aggressive resulting in many collisions. To avoid this, increase
1538          * the IPG and reduce Rx latency in the PHY.
1539          */
1540         if ((hw->mac.type >= e1000_pch2lan) && link) {
1541                 u16 speed, duplex;
1542
1543                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1544                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1545                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1546
1547                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1548                         tipg_reg |= 0xFF;
1549                         /* Reduce Rx latency in analog PHY */
1550                         emi_val = 0;
1551                 } else if (hw->mac.type >= e1000_pch_spt &&
1552                            duplex == FULL_DUPLEX && speed != SPEED_1000) {
1553                         tipg_reg |= 0xC;
1554                         emi_val = 1;
1555                 } else {
1556                         /* Roll back the default values */
1557                         tipg_reg |= 0x08;
1558                         emi_val = 1;
1559                 }
1560
1561                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1562
1563                 ret_val = hw->phy.ops.acquire(hw);
1564                 if (ret_val)
1565                         return ret_val;
1566
1567                 if (hw->mac.type == e1000_pch2lan)
1568                         emi_addr = I82579_RX_CONFIG;
1569                 else
1570                         emi_addr = I217_RX_CONFIG;
1571                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1572
1573
1574                 if (hw->mac.type >= e1000_pch_lpt) {
1575                         u16 phy_reg;
1576
1577                         hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1578                                                     &phy_reg);
1579                         phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1580                         if (speed == SPEED_100 || speed == SPEED_10)
1581                                 phy_reg |= 0x3E8;
1582                         else
1583                                 phy_reg |= 0xFA;
1584                         hw->phy.ops.write_reg_locked(hw,
1585                                                      I217_PLL_CLOCK_GATE_REG,
1586                                                      phy_reg);
1587
1588                         if (speed == SPEED_1000) {
1589                                 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1590                                                             &phy_reg);
1591
1592                                 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1593
1594                                 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1595                                                              phy_reg);
1596                                 }
1597                  }
1598                 hw->phy.ops.release(hw);
1599
1600                 if (ret_val)
1601                         return ret_val;
1602
1603                 if (hw->mac.type >= e1000_pch_spt) {
1604                         u16 data;
1605                         u16 ptr_gap;
1606
1607                         if (speed == SPEED_1000) {
1608                                 ret_val = hw->phy.ops.acquire(hw);
1609                                 if (ret_val)
1610                                         return ret_val;
1611
1612                                 ret_val = hw->phy.ops.read_reg_locked(hw,
1613                                                               PHY_REG(776, 20),
1614                                                               &data);
1615                                 if (ret_val) {
1616                                         hw->phy.ops.release(hw);
1617                                         return ret_val;
1618                                 }
1619
1620                                 ptr_gap = (data & (0x3FF << 2)) >> 2;
1621                                 if (ptr_gap < 0x18) {
1622                                         data &= ~(0x3FF << 2);
1623                                         data |= (0x18 << 2);
1624                                         ret_val =
1625                                                 hw->phy.ops.write_reg_locked(hw,
1626                                                         PHY_REG(776, 20), data);
1627                                 }
1628                                 hw->phy.ops.release(hw);
1629                                 if (ret_val)
1630                                         return ret_val;
1631                         } else {
1632                                 ret_val = hw->phy.ops.acquire(hw);
1633                                 if (ret_val)
1634                                         return ret_val;
1635
1636                                 ret_val = hw->phy.ops.write_reg_locked(hw,
1637                                                              PHY_REG(776, 20),
1638                                                              0xC023);
1639                                 hw->phy.ops.release(hw);
1640                                 if (ret_val)
1641                                         return ret_val;
1642
1643                         }
1644                 }
1645         }
1646
1647         /* I217 Packet Loss issue:
1648          * ensure that FEXTNVM4 Beacon Duration is set correctly
1649          * on power up.
1650          * Set the Beacon Duration for I217 to 8 usec
1651          */
1652         if (hw->mac.type >= e1000_pch_lpt) {
1653                 u32 mac_reg;
1654
1655                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1656                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1657                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1658                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1659         }
1660
1661         /* Work-around I218 hang issue */
1662         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1663             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1664             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1665             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1666                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1667                 if (ret_val)
1668                         return ret_val;
1669         }
1670         /* Clear link partner's EEE ability */
1671         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1672
1673         /* Configure K0s minimum time */
1674         if (hw->mac.type >= e1000_pch_lpt) {
1675                 e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
1676         }
1677
1678         if (hw->mac.type >= e1000_pch_lpt) {
1679                 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1680
1681                 if (hw->mac.type == e1000_pch_spt) {
1682                         /* FEXTNVM6 K1-off workaround - for SPT only */
1683                         u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1684
1685                         if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1686                                 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1687                         else
1688                                 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1689                 }
1690
1691                 if (hw->dev_spec.ich8lan.disable_k1_off == true)
1692                         fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1693
1694                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1695         }
1696
1697         if (!link)
1698                 return E1000_SUCCESS; /* No link detected */
1699
1700         mac->get_link_status = false;
1701
1702         switch (hw->mac.type) {
1703         case e1000_pch2lan:
1704                 ret_val = e1000_k1_workaround_lv(hw);
1705                 if (ret_val)
1706                         return ret_val;
1707                 /* fall-thru */
1708         case e1000_pchlan:
1709                 if (hw->phy.type == e1000_phy_82578) {
1710                         ret_val = e1000_link_stall_workaround_hv(hw);
1711                         if (ret_val)
1712                                 return ret_val;
1713                 }
1714
1715                 /* Workaround for PCHx parts in half-duplex:
1716                  * Set the number of preambles removed from the packet
1717                  * when it is passed from the PHY to the MAC to prevent
1718                  * the MAC from misinterpreting the packet type.
1719                  */
1720                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1721                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1722
1723                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1724                     E1000_STATUS_FD)
1725                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1726
1727                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1728                 break;
1729         default:
1730                 break;
1731         }
1732
1733         /* Check if there was DownShift, must be checked
1734          * immediately after link-up
1735          */
1736         e1000_check_downshift_generic(hw);
1737
1738         /* Enable/Disable EEE after link up */
1739         if (hw->phy.type > e1000_phy_82579) {
1740                 ret_val = e1000_set_eee_pchlan(hw);
1741                 if (ret_val)
1742                         return ret_val;
1743         }
1744
1745         /* If we are forcing speed/duplex, then we simply return since
1746          * we have already determined whether we have link or not.
1747          */
1748         if (!mac->autoneg)
1749                 return -E1000_ERR_CONFIG;
1750
1751         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1752          * of MAC speed/duplex configuration.  So we only need to
1753          * configure Collision Distance in the MAC.
1754          */
1755         mac->ops.config_collision_dist(hw);
1756
1757         /* Configure Flow Control now that Auto-Neg has completed.
1758          * First, we need to restore the desired flow control
1759          * settings because we may have had to re-autoneg with a
1760          * different link partner.
1761          */
1762         ret_val = e1000_config_fc_after_link_up_generic(hw);
1763         if (ret_val)
1764                 DEBUGOUT("Error configuring flow control\n");
1765
1766         return ret_val;
1767 }
1768
1769 /**
1770  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1771  *  @hw: pointer to the HW structure
1772  *
1773  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1774  **/
1775 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1776 {
1777         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1778
1779         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1780         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1781         switch (hw->mac.type) {
1782         case e1000_ich8lan:
1783         case e1000_ich9lan:
1784         case e1000_ich10lan:
1785                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1786                 break;
1787         case e1000_pchlan:
1788         case e1000_pch2lan:
1789         case e1000_pch_lpt:
1790         case e1000_pch_spt:
1791                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1792                 break;
1793         default:
1794                 break;
1795         }
1796 }
1797
1798 /**
1799  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1800  *  @hw: pointer to the HW structure
1801  *
1802  *  Acquires the mutex for performing NVM operations.
1803  **/
1804 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1805 {
1806         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1807
1808         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1809
1810         return E1000_SUCCESS;
1811 }
1812
1813 /**
1814  *  e1000_release_nvm_ich8lan - Release NVM mutex
1815  *  @hw: pointer to the HW structure
1816  *
1817  *  Releases the mutex used while performing NVM operations.
1818  **/
1819 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1820 {
1821         DEBUGFUNC("e1000_release_nvm_ich8lan");
1822
1823         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1824
1825         return;
1826 }
1827
1828 /**
1829  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1830  *  @hw: pointer to the HW structure
1831  *
1832  *  Acquires the software control flag for performing PHY and select
1833  *  MAC CSR accesses.
1834  **/
1835 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1836 {
1837         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1838         s32 ret_val = E1000_SUCCESS;
1839
1840         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1841
1842         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1843
1844         while (timeout) {
1845                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1846                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1847                         break;
1848
1849                 msec_delay_irq(1);
1850                 timeout--;
1851         }
1852
1853         if (!timeout) {
1854                 DEBUGOUT("SW has already locked the resource.\n");
1855                 ret_val = -E1000_ERR_CONFIG;
1856                 goto out;
1857         }
1858
1859         timeout = SW_FLAG_TIMEOUT;
1860
1861         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1862         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1863
1864         while (timeout) {
1865                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1866                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1867                         break;
1868
1869                 msec_delay_irq(1);
1870                 timeout--;
1871         }
1872
1873         if (!timeout) {
1874                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1875                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1876                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1877                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1878                 ret_val = -E1000_ERR_CONFIG;
1879                 goto out;
1880         }
1881
1882 out:
1883         if (ret_val)
1884                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1885
1886         return ret_val;
1887 }
1888
1889 /**
1890  *  e1000_release_swflag_ich8lan - Release software control flag
1891  *  @hw: pointer to the HW structure
1892  *
1893  *  Releases the software control flag for performing PHY and select
1894  *  MAC CSR accesses.
1895  **/
1896 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1897 {
1898         u32 extcnf_ctrl;
1899
1900         DEBUGFUNC("e1000_release_swflag_ich8lan");
1901
1902         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1903
1904         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1905                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1906                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1907         } else {
1908                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1909         }
1910
1911         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1912
1913         return;
1914 }
1915
1916 /**
1917  *  e1000_check_mng_mode_ich8lan - Checks management mode
1918  *  @hw: pointer to the HW structure
1919  *
1920  *  This checks if the adapter has any manageability enabled.
1921  *  This is a function pointer entry point only called by read/write
1922  *  routines for the PHY and NVM parts.
1923  **/
1924 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1925 {
1926         u32 fwsm;
1927
1928         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1929
1930         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1931
1932         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1933                ((fwsm & E1000_FWSM_MODE_MASK) ==
1934                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1935 }
1936
1937 /**
1938  *  e1000_check_mng_mode_pchlan - Checks management mode
1939  *  @hw: pointer to the HW structure
1940  *
1941  *  This checks if the adapter has iAMT enabled.
1942  *  This is a function pointer entry point only called by read/write
1943  *  routines for the PHY and NVM parts.
1944  **/
1945 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1946 {
1947         u32 fwsm;
1948
1949         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1950
1951         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1952
1953         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1954                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1955 }
1956
1957 /**
1958  *  e1000_rar_set_pch2lan - Set receive address register
1959  *  @hw: pointer to the HW structure
1960  *  @addr: pointer to the receive address
1961  *  @index: receive address array register
1962  *
1963  *  Sets the receive address array register at index to the address passed
1964  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1965  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1966  *  Use SHRA[0-3] in place of those reserved for ME.
1967  **/
1968 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1969 {
1970         u32 rar_low, rar_high;
1971
1972         DEBUGFUNC("e1000_rar_set_pch2lan");
1973
1974         /* HW expects these in little endian so we reverse the byte order
1975          * from network order (big endian) to little endian
1976          */
1977         rar_low = ((u32) addr[0] |
1978                    ((u32) addr[1] << 8) |
1979                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1980
1981         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1982
1983         /* If MAC address zero, no need to set the AV bit */
1984         if (rar_low || rar_high)
1985                 rar_high |= E1000_RAH_AV;
1986
1987         if (index == 0) {
1988                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1989                 E1000_WRITE_FLUSH(hw);
1990                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1991                 E1000_WRITE_FLUSH(hw);
1992                 return E1000_SUCCESS;
1993         }
1994
1995         /* RAR[1-6] are owned by manageability.  Skip those and program the
1996          * next address into the SHRA register array.
1997          */
1998         if (index < (u32) (hw->mac.rar_entry_count)) {
1999                 s32 ret_val;
2000
2001                 ret_val = e1000_acquire_swflag_ich8lan(hw);
2002                 if (ret_val)
2003                         goto out;
2004
2005                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2006                 E1000_WRITE_FLUSH(hw);
2007                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2008                 E1000_WRITE_FLUSH(hw);
2009
2010                 e1000_release_swflag_ich8lan(hw);
2011
2012                 /* verify the register updates */
2013                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2014                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2015                         return E1000_SUCCESS;
2016
2017                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2018                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2019         }
2020
2021 out:
2022         DEBUGOUT1("Failed to write receive address at index %d\n", index);
2023         return -E1000_ERR_CONFIG;
2024 }
2025
2026 /**
2027  *  e1000_rar_set_pch_lpt - Set receive address registers
2028  *  @hw: pointer to the HW structure
2029  *  @addr: pointer to the receive address
2030  *  @index: receive address array register
2031  *
2032  *  Sets the receive address register array at index to the address passed
2033  *  in by addr. For LPT, RAR[0] is the base address register that is to
2034  *  contain the MAC address. SHRA[0-10] are the shared receive address
2035  *  registers that are shared between the Host and manageability engine (ME).
2036  **/
2037 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2038 {
2039         u32 rar_low, rar_high;
2040         u32 wlock_mac;
2041
2042         DEBUGFUNC("e1000_rar_set_pch_lpt");
2043
2044         /* HW expects these in little endian so we reverse the byte order
2045          * from network order (big endian) to little endian
2046          */
2047         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2048                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2049
2050         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2051
2052         /* If MAC address zero, no need to set the AV bit */
2053         if (rar_low || rar_high)
2054                 rar_high |= E1000_RAH_AV;
2055
2056         if (index == 0) {
2057                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2058                 E1000_WRITE_FLUSH(hw);
2059                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2060                 E1000_WRITE_FLUSH(hw);
2061                 return E1000_SUCCESS;
2062         }
2063
2064         /* The manageability engine (ME) can lock certain SHRAR registers that
2065          * it is using - those registers are unavailable for use.
2066          */
2067         if (index < hw->mac.rar_entry_count) {
2068                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2069                             E1000_FWSM_WLOCK_MAC_MASK;
2070                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2071
2072                 /* Check if all SHRAR registers are locked */
2073                 if (wlock_mac == 1)
2074                         goto out;
2075
2076                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2077                         s32 ret_val;
2078
2079                         ret_val = e1000_acquire_swflag_ich8lan(hw);
2080
2081                         if (ret_val)
2082                                 goto out;
2083
2084                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2085                                         rar_low);
2086                         E1000_WRITE_FLUSH(hw);
2087                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2088                                         rar_high);
2089                         E1000_WRITE_FLUSH(hw);
2090
2091                         e1000_release_swflag_ich8lan(hw);
2092
2093                         /* verify the register updates */
2094                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2095                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2096                                 return E1000_SUCCESS;
2097                 }
2098         }
2099
2100 out:
2101         DEBUGOUT1("Failed to write receive address at index %d\n", index);
2102         return -E1000_ERR_CONFIG;
2103 }
2104
2105 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
2106 /**
2107  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2108  *  @hw: pointer to the HW structure
2109  *  @mc_addr_list: array of multicast addresses to program
2110  *  @mc_addr_count: number of multicast addresses to program
2111  *
2112  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2113  *  The caller must have a packed mc_addr_list of multicast addresses.
2114  **/
2115 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2116                                               u8 *mc_addr_list,
2117                                               u32 mc_addr_count)
2118 {
2119         u16 phy_reg = 0;
2120         int i;
2121         s32 ret_val;
2122
2123         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2124
2125         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2126
2127         ret_val = hw->phy.ops.acquire(hw);
2128         if (ret_val)
2129                 return;
2130
2131         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2132         if (ret_val)
2133                 goto release;
2134
2135         for (i = 0; i < hw->mac.mta_reg_count; i++) {
2136                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2137                                            (u16)(hw->mac.mta_shadow[i] &
2138                                                  0xFFFF));
2139                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2140                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
2141                                                  0xFFFF));
2142         }
2143
2144         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2145
2146 release:
2147         hw->phy.ops.release(hw);
2148 }
2149
2150 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
2151 /**
2152  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2153  *  @hw: pointer to the HW structure
2154  *
2155  *  Checks if firmware is blocking the reset of the PHY.
2156  *  This is a function pointer entry point only called by
2157  *  reset routines.
2158  **/
2159 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2160 {
2161         u32 fwsm;
2162         bool blocked = false;
2163         int i = 0;
2164
2165         DEBUGFUNC("e1000_check_reset_block_ich8lan");
2166
2167         do {
2168                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2169                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2170                         blocked = true;
2171                         msec_delay(10);
2172                         continue;
2173                 }
2174                 blocked = false;
2175         } while (blocked && (i++ < 30));
2176         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2177 }
2178
2179 /**
2180  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2181  *  @hw: pointer to the HW structure
2182  *
2183  *  Assumes semaphore already acquired.
2184  *
2185  **/
2186 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2187 {
2188         u16 phy_data;
2189         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2190         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2191                 E1000_STRAP_SMT_FREQ_SHIFT;
2192         s32 ret_val;
2193
2194         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2195
2196         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2197         if (ret_val)
2198                 return ret_val;
2199
2200         phy_data &= ~HV_SMB_ADDR_MASK;
2201         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2202         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2203
2204         if (hw->phy.type == e1000_phy_i217) {
2205                 /* Restore SMBus frequency */
2206                 if (freq--) {
2207                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2208                         phy_data |= (freq & (1 << 0)) <<
2209                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2210                         phy_data |= (freq & (1 << 1)) <<
2211                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2212                 } else {
2213                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2214                 }
2215         }
2216
2217         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2218 }
2219
2220 /**
2221  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2222  *  @hw:   pointer to the HW structure
2223  *
2224  *  SW should configure the LCD from the NVM extended configuration region
2225  *  as a workaround for certain parts.
2226  **/
2227 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2228 {
2229         struct e1000_phy_info *phy = &hw->phy;
2230         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2231         s32 ret_val = E1000_SUCCESS;
2232         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2233
2234         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2235
2236         /* Initialize the PHY from the NVM on ICH platforms.  This
2237          * is needed due to an issue where the NVM configuration is
2238          * not properly autoloaded after power transitions.
2239          * Therefore, after each PHY reset, we will load the
2240          * configuration data out of the NVM manually.
2241          */
2242         switch (hw->mac.type) {
2243         case e1000_ich8lan:
2244                 if (phy->type != e1000_phy_igp_3)
2245                         return ret_val;
2246
2247                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2248                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2249                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2250                         break;
2251                 }
2252                 /* Fall-thru */
2253         case e1000_pchlan:
2254         case e1000_pch2lan:
2255         case e1000_pch_lpt:
2256         case e1000_pch_spt:
2257                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2258                 break;
2259         default:
2260                 return ret_val;
2261         }
2262
2263         ret_val = hw->phy.ops.acquire(hw);
2264         if (ret_val)
2265                 return ret_val;
2266
2267         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2268         if (!(data & sw_cfg_mask))
2269                 goto release;
2270
2271         /* Make sure HW does not configure LCD from PHY
2272          * extended configuration before SW configuration
2273          */
2274         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2275         if ((hw->mac.type < e1000_pch2lan) &&
2276             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2277                         goto release;
2278
2279         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2280         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2281         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2282         if (!cnf_size)
2283                 goto release;
2284
2285         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2286         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2287
2288         if (((hw->mac.type == e1000_pchlan) &&
2289              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2290             (hw->mac.type > e1000_pchlan)) {
2291                 /* HW configures the SMBus address and LEDs when the
2292                  * OEM and LCD Write Enable bits are set in the NVM.
2293                  * When both NVM bits are cleared, SW will configure
2294                  * them instead.
2295                  */
2296                 ret_val = e1000_write_smbus_addr(hw);
2297                 if (ret_val)
2298                         goto release;
2299
2300                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2301                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2302                                                         (u16)data);
2303                 if (ret_val)
2304                         goto release;
2305         }
2306
2307         /* Configure LCD from extended configuration region. */
2308
2309         /* cnf_base_addr is in DWORD */
2310         word_addr = (u16)(cnf_base_addr << 1);
2311
2312         for (i = 0; i < cnf_size; i++) {
2313                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2314                                            &reg_data);
2315                 if (ret_val)
2316                         goto release;
2317
2318                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2319                                            1, &reg_addr);
2320                 if (ret_val)
2321                         goto release;
2322
2323                 /* Save off the PHY page for future writes. */
2324                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2325                         phy_page = reg_data;
2326                         continue;
2327                 }
2328
2329                 reg_addr &= PHY_REG_MASK;
2330                 reg_addr |= phy_page;
2331
2332                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2333                                                     reg_data);
2334                 if (ret_val)
2335                         goto release;
2336         }
2337
2338 release:
2339         hw->phy.ops.release(hw);
2340         return ret_val;
2341 }
2342
2343 /**
2344  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2345  *  @hw:   pointer to the HW structure
2346  *  @link: link up bool flag
2347  *
2348  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2349  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2350  *  If link is down, the function will restore the default K1 setting located
2351  *  in the NVM.
2352  **/
2353 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2354 {
2355         s32 ret_val = E1000_SUCCESS;
2356         u16 status_reg = 0;
2357         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2358
2359         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2360
2361         if (hw->mac.type != e1000_pchlan)
2362                 return E1000_SUCCESS;
2363
2364         /* Wrap the whole flow with the sw flag */
2365         ret_val = hw->phy.ops.acquire(hw);
2366         if (ret_val)
2367                 return ret_val;
2368
2369         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2370         if (link) {
2371                 if (hw->phy.type == e1000_phy_82578) {
2372                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2373                                                               &status_reg);
2374                         if (ret_val)
2375                                 goto release;
2376
2377                         status_reg &= (BM_CS_STATUS_LINK_UP |
2378                                        BM_CS_STATUS_RESOLVED |
2379                                        BM_CS_STATUS_SPEED_MASK);
2380
2381                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2382                                            BM_CS_STATUS_RESOLVED |
2383                                            BM_CS_STATUS_SPEED_1000))
2384                                 k1_enable = false;
2385                 }
2386
2387                 if (hw->phy.type == e1000_phy_82577) {
2388                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2389                                                               &status_reg);
2390                         if (ret_val)
2391                                 goto release;
2392
2393                         status_reg &= (HV_M_STATUS_LINK_UP |
2394                                        HV_M_STATUS_AUTONEG_COMPLETE |
2395                                        HV_M_STATUS_SPEED_MASK);
2396
2397                         if (status_reg == (HV_M_STATUS_LINK_UP |
2398                                            HV_M_STATUS_AUTONEG_COMPLETE |
2399                                            HV_M_STATUS_SPEED_1000))
2400                                 k1_enable = false;
2401                 }
2402
2403                 /* Link stall fix for link up */
2404                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2405                                                        0x0100);
2406                 if (ret_val)
2407                         goto release;
2408
2409         } else {
2410                 /* Link stall fix for link down */
2411                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2412                                                        0x4100);
2413                 if (ret_val)
2414                         goto release;
2415         }
2416
2417         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2418
2419 release:
2420         hw->phy.ops.release(hw);
2421
2422         return ret_val;
2423 }
2424
2425 /**
2426  *  e1000_configure_k1_ich8lan - Configure K1 power state
2427  *  @hw: pointer to the HW structure
2428  *  @enable: K1 state to configure
2429  *
2430  *  Configure the K1 power state based on the provided parameter.
2431  *  Assumes semaphore already acquired.
2432  *
2433  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2434  **/
2435 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2436 {
2437         s32 ret_val;
2438         u32 ctrl_reg = 0;
2439         u32 ctrl_ext = 0;
2440         u32 reg = 0;
2441         u16 kmrn_reg = 0;
2442
2443         DEBUGFUNC("e1000_configure_k1_ich8lan");
2444
2445         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2446                                              &kmrn_reg);
2447         if (ret_val)
2448                 return ret_val;
2449
2450         if (k1_enable)
2451                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2452         else
2453                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2454
2455         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2456                                               kmrn_reg);
2457         if (ret_val)
2458                 return ret_val;
2459
2460         usec_delay(20);
2461         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2462         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2463
2464         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2465         reg |= E1000_CTRL_FRCSPD;
2466         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2467
2468         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2469         E1000_WRITE_FLUSH(hw);
2470         usec_delay(20);
2471         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2472         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2473         E1000_WRITE_FLUSH(hw);
2474         usec_delay(20);
2475
2476         return E1000_SUCCESS;
2477 }
2478
2479 /**
2480  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2481  *  @hw:       pointer to the HW structure
2482  *  @d0_state: boolean if entering d0 or d3 device state
2483  *
2484  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2485  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2486  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2487  **/
2488 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2489 {
2490         s32 ret_val = 0;
2491         u32 mac_reg;
2492         u16 oem_reg;
2493
2494         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2495
2496         if (hw->mac.type < e1000_pchlan)
2497                 return ret_val;
2498
2499         ret_val = hw->phy.ops.acquire(hw);
2500         if (ret_val)
2501                 return ret_val;
2502
2503         if (hw->mac.type == e1000_pchlan) {
2504                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2505                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2506                         goto release;
2507         }
2508
2509         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2510         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2511                 goto release;
2512
2513         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2514
2515         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2516         if (ret_val)
2517                 goto release;
2518
2519         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2520
2521         if (d0_state) {
2522                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2523                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2524
2525                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2526                         oem_reg |= HV_OEM_BITS_LPLU;
2527         } else {
2528                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2529                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2530                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2531
2532                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2533                     E1000_PHY_CTRL_NOND0A_LPLU))
2534                         oem_reg |= HV_OEM_BITS_LPLU;
2535         }
2536
2537         /* Set Restart auto-neg to activate the bits */
2538         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2539             !hw->phy.ops.check_reset_block(hw))
2540                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2541
2542         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2543
2544 release:
2545         hw->phy.ops.release(hw);
2546
2547         return ret_val;
2548 }
2549
2550
2551 /**
2552  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2553  *  @hw:   pointer to the HW structure
2554  **/
2555 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2556 {
2557         s32 ret_val;
2558         u16 data;
2559
2560         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2561
2562         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2563         if (ret_val)
2564                 return ret_val;
2565
2566         data |= HV_KMRN_MDIO_SLOW;
2567
2568         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2569
2570         return ret_val;
2571 }
2572
2573 /**
2574  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2575  *  done after every PHY reset.
2576  **/
2577 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2578 {
2579         s32 ret_val = E1000_SUCCESS;
2580         u16 phy_data;
2581
2582         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2583
2584         if (hw->mac.type != e1000_pchlan)
2585                 return E1000_SUCCESS;
2586
2587         /* Set MDIO slow mode before any other MDIO access */
2588         if (hw->phy.type == e1000_phy_82577) {
2589                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2590                 if (ret_val)
2591                         return ret_val;
2592         }
2593
2594         if (((hw->phy.type == e1000_phy_82577) &&
2595              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2596             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2597                 /* Disable generation of early preamble */
2598                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2599                 if (ret_val)
2600                         return ret_val;
2601
2602                 /* Preamble tuning for SSC */
2603                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2604                                                 0xA204);
2605                 if (ret_val)
2606                         return ret_val;
2607         }
2608
2609         if (hw->phy.type == e1000_phy_82578) {
2610                 /* Return registers to default by doing a soft reset then
2611                  * writing 0x3140 to the control register.
2612                  */
2613                 if (hw->phy.revision < 2) {
2614                         e1000_phy_sw_reset_generic(hw);
2615                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2616                                                         0x3140);
2617                 }
2618         }
2619
2620         /* Select page 0 */
2621         ret_val = hw->phy.ops.acquire(hw);
2622         if (ret_val)
2623                 return ret_val;
2624
2625         hw->phy.addr = 1;
2626         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2627         hw->phy.ops.release(hw);
2628         if (ret_val)
2629                 return ret_val;
2630
2631         /* Configure the K1 Si workaround during phy reset assuming there is
2632          * link so that it disables K1 if link is in 1Gbps.
2633          */
2634         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2635         if (ret_val)
2636                 return ret_val;
2637
2638         /* Workaround for link disconnects on a busy hub in half duplex */
2639         ret_val = hw->phy.ops.acquire(hw);
2640         if (ret_val)
2641                 return ret_val;
2642         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2643         if (ret_val)
2644                 goto release;
2645         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2646                                                phy_data & 0x00FF);
2647         if (ret_val)
2648                 goto release;
2649
2650         /* set MSE higher to enable link to stay up when noise is high */
2651         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2652 release:
2653         hw->phy.ops.release(hw);
2654
2655         return ret_val;
2656 }
2657
2658 /**
2659  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2660  *  @hw:   pointer to the HW structure
2661  **/
2662 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2663 {
2664         u32 mac_reg;
2665         u16 i, phy_reg = 0;
2666         s32 ret_val;
2667
2668         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2669
2670         ret_val = hw->phy.ops.acquire(hw);
2671         if (ret_val)
2672                 return;
2673         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2674         if (ret_val)
2675                 goto release;
2676
2677         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2678         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2679                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2680                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2681                                            (u16)(mac_reg & 0xFFFF));
2682                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2683                                            (u16)((mac_reg >> 16) & 0xFFFF));
2684
2685                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2686                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2687                                            (u16)(mac_reg & 0xFFFF));
2688                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2689                                            (u16)((mac_reg & E1000_RAH_AV)
2690                                                  >> 16));
2691         }
2692
2693         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2694
2695 release:
2696         hw->phy.ops.release(hw);
2697 }
2698
2699 #ifndef CRC32_OS_SUPPORT
2700 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2701 {
2702         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2703         u32 i, j, mask, crc;
2704
2705         DEBUGFUNC("e1000_calc_rx_da_crc");
2706
2707         crc = 0xffffffff;
2708         for (i = 0; i < 6; i++) {
2709                 crc = crc ^ mac[i];
2710                 for (j = 8; j > 0; j--) {
2711                         mask = (crc & 1) * (-1);
2712                         crc = (crc >> 1) ^ (poly & mask);
2713                 }
2714         }
2715         return ~crc;
2716 }
2717
2718 #endif /* CRC32_OS_SUPPORT */
2719 /**
2720  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2721  *  with 82579 PHY
2722  *  @hw: pointer to the HW structure
2723  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2724  **/
2725 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2726 {
2727         s32 ret_val = E1000_SUCCESS;
2728         u16 phy_reg, data;
2729         u32 mac_reg;
2730         u16 i;
2731
2732         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2733
2734         if (hw->mac.type < e1000_pch2lan)
2735                 return E1000_SUCCESS;
2736
2737         /* disable Rx path while enabling/disabling workaround */
2738         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2739         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2740                                         phy_reg | (1 << 14));
2741         if (ret_val)
2742                 return ret_val;
2743
2744         if (enable) {
2745                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2746                  * SHRAL/H) and initial CRC values to the MAC
2747                  */
2748                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2749                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2750                         u32 addr_high, addr_low;
2751
2752                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2753                         if (!(addr_high & E1000_RAH_AV))
2754                                 continue;
2755                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2756                         mac_addr[0] = (addr_low & 0xFF);
2757                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2758                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2759                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2760                         mac_addr[4] = (addr_high & 0xFF);
2761                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2762
2763 #ifndef CRC32_OS_SUPPORT
2764                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2765                                         e1000_calc_rx_da_crc(mac_addr));
2766 #else /* CRC32_OS_SUPPORT */
2767                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2768                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2769 #endif /* CRC32_OS_SUPPORT */
2770                 }
2771
2772                 /* Write Rx addresses to the PHY */
2773                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2774
2775                 /* Enable jumbo frame workaround in the MAC */
2776                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2777                 mac_reg &= ~(1 << 14);
2778                 mac_reg |= (7 << 15);
2779                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2780
2781                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2782                 mac_reg |= E1000_RCTL_SECRC;
2783                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2784
2785                 ret_val = e1000_read_kmrn_reg_generic(hw,
2786                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2787                                                 &data);
2788                 if (ret_val)
2789                         return ret_val;
2790                 ret_val = e1000_write_kmrn_reg_generic(hw,
2791                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2792                                                 data | (1 << 0));
2793                 if (ret_val)
2794                         return ret_val;
2795                 ret_val = e1000_read_kmrn_reg_generic(hw,
2796                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2797                                                 &data);
2798                 if (ret_val)
2799                         return ret_val;
2800                 data &= ~(0xF << 8);
2801                 data |= (0xB << 8);
2802                 ret_val = e1000_write_kmrn_reg_generic(hw,
2803                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2804                                                 data);
2805                 if (ret_val)
2806                         return ret_val;
2807
2808                 /* Enable jumbo frame workaround in the PHY */
2809                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2810                 data &= ~(0x7F << 5);
2811                 data |= (0x37 << 5);
2812                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2813                 if (ret_val)
2814                         return ret_val;
2815                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2816                 data &= ~(1 << 13);
2817                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2818                 if (ret_val)
2819                         return ret_val;
2820                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2821                 data &= ~(0x3FF << 2);
2822                 data |= (E1000_TX_PTR_GAP << 2);
2823                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2824                 if (ret_val)
2825                         return ret_val;
2826                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2827                 if (ret_val)
2828                         return ret_val;
2829                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2830                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2831                                                 (1 << 10));
2832                 if (ret_val)
2833                         return ret_val;
2834         } else {
2835                 /* Write MAC register values back to h/w defaults */
2836                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2837                 mac_reg &= ~(0xF << 14);
2838                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2839
2840                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2841                 mac_reg &= ~E1000_RCTL_SECRC;
2842                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2843
2844                 ret_val = e1000_read_kmrn_reg_generic(hw,
2845                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2846                                                 &data);
2847                 if (ret_val)
2848                         return ret_val;
2849                 ret_val = e1000_write_kmrn_reg_generic(hw,
2850                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2851                                                 data & ~(1 << 0));
2852                 if (ret_val)
2853                         return ret_val;
2854                 ret_val = e1000_read_kmrn_reg_generic(hw,
2855                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2856                                                 &data);
2857                 if (ret_val)
2858                         return ret_val;
2859                 data &= ~(0xF << 8);
2860                 data |= (0xB << 8);
2861                 ret_val = e1000_write_kmrn_reg_generic(hw,
2862                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2863                                                 data);
2864                 if (ret_val)
2865                         return ret_val;
2866
2867                 /* Write PHY register values back to h/w defaults */
2868                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2869                 data &= ~(0x7F << 5);
2870                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2871                 if (ret_val)
2872                         return ret_val;
2873                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2874                 data |= (1 << 13);
2875                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2876                 if (ret_val)
2877                         return ret_val;
2878                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2879                 data &= ~(0x3FF << 2);
2880                 data |= (0x8 << 2);
2881                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2882                 if (ret_val)
2883                         return ret_val;
2884                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2885                 if (ret_val)
2886                         return ret_val;
2887                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2888                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2889                                                 ~(1 << 10));
2890                 if (ret_val)
2891                         return ret_val;
2892         }
2893
2894         /* re-enable Rx path after enabling/disabling workaround */
2895         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2896                                      ~(1 << 14));
2897 }
2898
2899 /**
2900  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2901  *  done after every PHY reset.
2902  **/
2903 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2904 {
2905         s32 ret_val = E1000_SUCCESS;
2906
2907         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2908
2909         if (hw->mac.type != e1000_pch2lan)
2910                 return E1000_SUCCESS;
2911
2912         /* Set MDIO slow mode before any other MDIO access */
2913         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2914         if (ret_val)
2915                 return ret_val;
2916
2917         ret_val = hw->phy.ops.acquire(hw);
2918         if (ret_val)
2919                 return ret_val;
2920         /* set MSE higher to enable link to stay up when noise is high */
2921         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2922         if (ret_val)
2923                 goto release;
2924         /* drop link after 5 times MSE threshold was reached */
2925         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2926 release:
2927         hw->phy.ops.release(hw);
2928
2929         return ret_val;
2930 }
2931
2932 /**
2933  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2934  *  @hw:   pointer to the HW structure
2935  *
2936  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2937  *  Disable K1 for 1000 and 100 speeds
2938  **/
2939 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2940 {
2941         s32 ret_val = E1000_SUCCESS;
2942         u16 status_reg = 0;
2943
2944         DEBUGFUNC("e1000_k1_workaround_lv");
2945
2946         if (hw->mac.type != e1000_pch2lan)
2947                 return E1000_SUCCESS;
2948
2949         /* Set K1 beacon duration based on 10Mbs speed */
2950         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2951         if (ret_val)
2952                 return ret_val;
2953
2954         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2955             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2956                 if (status_reg &
2957                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2958                         u16 pm_phy_reg;
2959
2960                         /* LV 1G/100 Packet drop issue wa  */
2961                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2962                                                        &pm_phy_reg);
2963                         if (ret_val)
2964                                 return ret_val;
2965                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2966                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2967                                                         pm_phy_reg);
2968                         if (ret_val)
2969                                 return ret_val;
2970                 } else {
2971                         u32 mac_reg;
2972                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2973                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2974                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2975                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2976                 }
2977         }
2978
2979         return ret_val;
2980 }
2981
2982 /**
2983  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2984  *  @hw:   pointer to the HW structure
2985  *  @gate: boolean set to true to gate, false to ungate
2986  *
2987  *  Gate/ungate the automatic PHY configuration via hardware; perform
2988  *  the configuration via software instead.
2989  **/
2990 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2991 {
2992         u32 extcnf_ctrl;
2993
2994         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2995
2996         if (hw->mac.type < e1000_pch2lan)
2997                 return;
2998
2999         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3000
3001         if (gate)
3002                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3003         else
3004                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3005
3006         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3007 }
3008
3009 /**
3010  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
3011  *  @hw: pointer to the HW structure
3012  *
3013  *  Check the appropriate indication the MAC has finished configuring the
3014  *  PHY after a software reset.
3015  **/
3016 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3017 {
3018         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3019
3020         DEBUGFUNC("e1000_lan_init_done_ich8lan");
3021
3022         /* Wait for basic configuration completes before proceeding */
3023         do {
3024                 data = E1000_READ_REG(hw, E1000_STATUS);
3025                 data &= E1000_STATUS_LAN_INIT_DONE;
3026                 usec_delay(100);
3027         } while ((!data) && --loop);
3028
3029         /* If basic configuration is incomplete before the above loop
3030          * count reaches 0, loading the configuration from NVM will
3031          * leave the PHY in a bad state possibly resulting in no link.
3032          */
3033         if (loop == 0)
3034                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3035
3036         /* Clear the Init Done bit for the next init event */
3037         data = E1000_READ_REG(hw, E1000_STATUS);
3038         data &= ~E1000_STATUS_LAN_INIT_DONE;
3039         E1000_WRITE_REG(hw, E1000_STATUS, data);
3040 }
3041
3042 /**
3043  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3044  *  @hw: pointer to the HW structure
3045  **/
3046 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3047 {
3048         s32 ret_val = E1000_SUCCESS;
3049         u16 reg;
3050
3051         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3052
3053         if (hw->phy.ops.check_reset_block(hw))
3054                 return E1000_SUCCESS;
3055
3056         /* Allow time for h/w to get to quiescent state after reset */
3057         msec_delay(10);
3058
3059         /* Perform any necessary post-reset workarounds */
3060         switch (hw->mac.type) {
3061         case e1000_pchlan:
3062                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3063                 if (ret_val)
3064                         return ret_val;
3065                 break;
3066         case e1000_pch2lan:
3067                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3068                 if (ret_val)
3069                         return ret_val;
3070                 break;
3071         default:
3072                 break;
3073         }
3074
3075         /* Clear the host wakeup bit after lcd reset */
3076         if (hw->mac.type >= e1000_pchlan) {
3077                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3078                 reg &= ~BM_WUC_HOST_WU_BIT;
3079                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3080         }
3081
3082         /* Configure the LCD with the extended configuration region in NVM */
3083         ret_val = e1000_sw_lcd_config_ich8lan(hw);
3084         if (ret_val)
3085                 return ret_val;
3086
3087         /* Configure the LCD with the OEM bits in NVM */
3088         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
3089
3090         if (hw->mac.type == e1000_pch2lan) {
3091                 /* Ungate automatic PHY configuration on non-managed 82579 */
3092                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3093                     E1000_ICH_FWSM_FW_VALID)) {
3094                         msec_delay(10);
3095                         e1000_gate_hw_phy_config_ich8lan(hw, false);
3096                 }
3097
3098                 /* Set EEE LPI Update Timer to 200usec */
3099                 ret_val = hw->phy.ops.acquire(hw);
3100                 if (ret_val)
3101                         return ret_val;
3102                 ret_val = e1000_write_emi_reg_locked(hw,
3103                                                      I82579_LPI_UPDATE_TIMER,
3104                                                      0x1387);
3105                 hw->phy.ops.release(hw);
3106         }
3107
3108         return ret_val;
3109 }
3110
3111 /**
3112  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3113  *  @hw: pointer to the HW structure
3114  *
3115  *  Resets the PHY
3116  *  This is a function pointer entry point called by drivers
3117  *  or other shared routines.
3118  **/
3119 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3120 {
3121         s32 ret_val = E1000_SUCCESS;
3122
3123         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3124
3125         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3126         if ((hw->mac.type == e1000_pch2lan) &&
3127             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3128                 e1000_gate_hw_phy_config_ich8lan(hw, true);
3129
3130         ret_val = e1000_phy_hw_reset_generic(hw);
3131         if (ret_val)
3132                 return ret_val;
3133
3134         return e1000_post_phy_reset_ich8lan(hw);
3135 }
3136
3137 /**
3138  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3139  *  @hw: pointer to the HW structure
3140  *  @active: true to enable LPLU, false to disable
3141  *
3142  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3143  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3144  *  the phy speed. This function will manually set the LPLU bit and restart
3145  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3146  *  since it configures the same bit.
3147  **/
3148 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3149 {
3150         s32 ret_val;
3151         u16 oem_reg;
3152
3153         DEBUGFUNC("e1000_set_lplu_state_pchlan");
3154         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3155         if (ret_val)
3156                 return ret_val;
3157
3158         if (active)
3159                 oem_reg |= HV_OEM_BITS_LPLU;
3160         else
3161                 oem_reg &= ~HV_OEM_BITS_LPLU;
3162
3163         if (!hw->phy.ops.check_reset_block(hw))
3164                 oem_reg |= HV_OEM_BITS_RESTART_AN;
3165
3166         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3167 }
3168
3169 /**
3170  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3171  *  @hw: pointer to the HW structure
3172  *  @active: true to enable LPLU, false to disable
3173  *
3174  *  Sets the LPLU D0 state according to the active flag.  When
3175  *  activating LPLU this function also disables smart speed
3176  *  and vice versa.  LPLU will not be activated unless the
3177  *  device autonegotiation advertisement meets standards of
3178  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3179  *  This is a function pointer entry point only called by
3180  *  PHY setup routines.
3181  **/
3182 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3183 {
3184         struct e1000_phy_info *phy = &hw->phy;
3185         u32 phy_ctrl;
3186         s32 ret_val = E1000_SUCCESS;
3187         u16 data;
3188
3189         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3190
3191         if (phy->type == e1000_phy_ife)
3192                 return E1000_SUCCESS;
3193
3194         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3195
3196         if (active) {
3197                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3198                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3199
3200                 if (phy->type != e1000_phy_igp_3)
3201                         return E1000_SUCCESS;
3202
3203                 /* Call gig speed drop workaround on LPLU before accessing
3204                  * any PHY registers
3205                  */
3206                 if (hw->mac.type == e1000_ich8lan)
3207                         e1000_gig_downshift_workaround_ich8lan(hw);
3208
3209                 /* When LPLU is enabled, we should disable SmartSpeed */
3210                 ret_val = phy->ops.read_reg(hw,
3211                                             IGP01E1000_PHY_PORT_CONFIG,
3212                                             &data);
3213                 if (ret_val)
3214                         return ret_val;
3215                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3216                 ret_val = phy->ops.write_reg(hw,
3217                                              IGP01E1000_PHY_PORT_CONFIG,
3218                                              data);
3219                 if (ret_val)
3220                         return ret_val;
3221         } else {
3222                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3223                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3224
3225                 if (phy->type != e1000_phy_igp_3)
3226                         return E1000_SUCCESS;
3227
3228                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3229                  * during Dx states where the power conservation is most
3230                  * important.  During driver activity we should enable
3231                  * SmartSpeed, so performance is maintained.
3232                  */
3233                 if (phy->smart_speed == e1000_smart_speed_on) {
3234                         ret_val = phy->ops.read_reg(hw,
3235                                                     IGP01E1000_PHY_PORT_CONFIG,
3236                                                     &data);
3237                         if (ret_val)
3238                                 return ret_val;
3239
3240                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3241                         ret_val = phy->ops.write_reg(hw,
3242                                                      IGP01E1000_PHY_PORT_CONFIG,
3243                                                      data);
3244                         if (ret_val)
3245                                 return ret_val;
3246                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3247                         ret_val = phy->ops.read_reg(hw,
3248                                                     IGP01E1000_PHY_PORT_CONFIG,
3249                                                     &data);
3250                         if (ret_val)
3251                                 return ret_val;
3252
3253                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3254                         ret_val = phy->ops.write_reg(hw,
3255                                                      IGP01E1000_PHY_PORT_CONFIG,
3256                                                      data);
3257                         if (ret_val)
3258                                 return ret_val;
3259                 }
3260         }
3261
3262         return E1000_SUCCESS;
3263 }
3264
3265 /**
3266  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3267  *  @hw: pointer to the HW structure
3268  *  @active: true to enable LPLU, false to disable
3269  *
3270  *  Sets the LPLU D3 state according to the active flag.  When
3271  *  activating LPLU this function also disables smart speed
3272  *  and vice versa.  LPLU will not be activated unless the
3273  *  device autonegotiation advertisement meets standards of
3274  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3275  *  This is a function pointer entry point only called by
3276  *  PHY setup routines.
3277  **/
3278 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3279 {
3280         struct e1000_phy_info *phy = &hw->phy;
3281         u32 phy_ctrl;
3282         s32 ret_val = E1000_SUCCESS;
3283         u16 data;
3284
3285         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3286
3287         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3288
3289         if (!active) {
3290                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3291                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3292
3293                 if (phy->type != e1000_phy_igp_3)
3294                         return E1000_SUCCESS;
3295
3296                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3297                  * during Dx states where the power conservation is most
3298                  * important.  During driver activity we should enable
3299                  * SmartSpeed, so performance is maintained.
3300                  */
3301                 if (phy->smart_speed == e1000_smart_speed_on) {
3302                         ret_val = phy->ops.read_reg(hw,
3303                                                     IGP01E1000_PHY_PORT_CONFIG,
3304                                                     &data);
3305                         if (ret_val)
3306                                 return ret_val;
3307
3308                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3309                         ret_val = phy->ops.write_reg(hw,
3310                                                      IGP01E1000_PHY_PORT_CONFIG,
3311                                                      data);
3312                         if (ret_val)
3313                                 return ret_val;
3314                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3315                         ret_val = phy->ops.read_reg(hw,
3316                                                     IGP01E1000_PHY_PORT_CONFIG,
3317                                                     &data);
3318                         if (ret_val)
3319                                 return ret_val;
3320
3321                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3322                         ret_val = phy->ops.write_reg(hw,
3323                                                      IGP01E1000_PHY_PORT_CONFIG,
3324                                                      data);
3325                         if (ret_val)
3326                                 return ret_val;
3327                 }
3328         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3329                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3330                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3331                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3332                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3333
3334                 if (phy->type != e1000_phy_igp_3)
3335                         return E1000_SUCCESS;
3336
3337                 /* Call gig speed drop workaround on LPLU before accessing
3338                  * any PHY registers
3339                  */
3340                 if (hw->mac.type == e1000_ich8lan)
3341                         e1000_gig_downshift_workaround_ich8lan(hw);
3342
3343                 /* When LPLU is enabled, we should disable SmartSpeed */
3344                 ret_val = phy->ops.read_reg(hw,
3345                                             IGP01E1000_PHY_PORT_CONFIG,
3346                                             &data);
3347                 if (ret_val)
3348                         return ret_val;
3349
3350                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3351                 ret_val = phy->ops.write_reg(hw,
3352                                              IGP01E1000_PHY_PORT_CONFIG,
3353                                              data);
3354         }
3355
3356         return ret_val;
3357 }
3358
3359 /**
3360  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3361  *  @hw: pointer to the HW structure
3362  *  @bank:  pointer to the variable that returns the active bank
3363  *
3364  *  Reads signature byte from the NVM using the flash access registers.
3365  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3366  **/
3367 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3368 {
3369         u32 eecd;
3370         struct e1000_nvm_info *nvm = &hw->nvm;
3371         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3372         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3373         u32 nvm_dword = 0;
3374         u8 sig_byte = 0;
3375         s32 ret_val;
3376
3377         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3378
3379         switch (hw->mac.type) {
3380         case e1000_pch_spt:
3381                 bank1_offset = nvm->flash_bank_size;
3382                 act_offset = E1000_ICH_NVM_SIG_WORD;
3383
3384                 /* set bank to 0 in case flash read fails */
3385                 *bank = 0;
3386
3387                 /* Check bank 0 */
3388                 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3389                                                          &nvm_dword);
3390                 if (ret_val)
3391                         return ret_val;
3392                 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3393                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3394                     E1000_ICH_NVM_SIG_VALUE) {
3395                         *bank = 0;
3396                         return E1000_SUCCESS;
3397                 }
3398
3399                 /* Check bank 1 */
3400                 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3401                                                          bank1_offset,
3402                                                          &nvm_dword);
3403                 if (ret_val)
3404                         return ret_val;
3405                 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3406                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3407                     E1000_ICH_NVM_SIG_VALUE) {
3408                         *bank = 1;
3409                         return E1000_SUCCESS;
3410                 }
3411
3412                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3413                 return -E1000_ERR_NVM;
3414         case e1000_ich8lan:
3415         case e1000_ich9lan:
3416                 eecd = E1000_READ_REG(hw, E1000_EECD);
3417                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3418                     E1000_EECD_SEC1VAL_VALID_MASK) {
3419                         if (eecd & E1000_EECD_SEC1VAL)
3420                                 *bank = 1;
3421                         else
3422                                 *bank = 0;
3423
3424                         return E1000_SUCCESS;
3425                 }
3426                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3427                 /* fall-thru */
3428         default:
3429                 /* set bank to 0 in case flash read fails */
3430                 *bank = 0;
3431
3432                 /* Check bank 0 */
3433                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3434                                                         &sig_byte);
3435                 if (ret_val)
3436                         return ret_val;
3437                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3438                     E1000_ICH_NVM_SIG_VALUE) {
3439                         *bank = 0;
3440                         return E1000_SUCCESS;
3441                 }
3442
3443                 /* Check bank 1 */
3444                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3445                                                         bank1_offset,
3446                                                         &sig_byte);
3447                 if (ret_val)
3448                         return ret_val;
3449                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3450                     E1000_ICH_NVM_SIG_VALUE) {
3451                         *bank = 1;
3452                         return E1000_SUCCESS;
3453                 }
3454
3455                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3456                 return -E1000_ERR_NVM;
3457         }
3458 }
3459
3460 /**
3461  *  e1000_read_nvm_spt - NVM access for SPT
3462  *  @hw: pointer to the HW structure
3463  *  @offset: The offset (in bytes) of the word(s) to read.
3464  *  @words: Size of data to read in words.
3465  *  @data: pointer to the word(s) to read at offset.
3466  *
3467  *  Reads a word(s) from the NVM
3468  **/
3469 STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3470                               u16 *data)
3471 {
3472         struct e1000_nvm_info *nvm = &hw->nvm;
3473         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3474         u32 act_offset;
3475         s32 ret_val = E1000_SUCCESS;
3476         u32 bank = 0;
3477         u32 dword = 0;
3478         u16 offset_to_read;
3479         u16 i;
3480
3481         DEBUGFUNC("e1000_read_nvm_spt");
3482
3483         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3484             (words == 0)) {
3485                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3486                 ret_val = -E1000_ERR_NVM;
3487                 goto out;
3488         }
3489
3490         nvm->ops.acquire(hw);
3491
3492         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3493         if (ret_val != E1000_SUCCESS) {
3494                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3495                 bank = 0;
3496         }
3497
3498         act_offset = (bank) ? nvm->flash_bank_size : 0;
3499         act_offset += offset;
3500
3501         ret_val = E1000_SUCCESS;
3502
3503         for (i = 0; i < words; i += 2) {
3504                 if (words - i == 1) {
3505                         if (dev_spec->shadow_ram[offset+i].modified) {
3506                                 data[i] = dev_spec->shadow_ram[offset+i].value;
3507                         } else {
3508                                 offset_to_read = act_offset + i -
3509                                                  ((act_offset + i) % 2);
3510                                 ret_val =
3511                                    e1000_read_flash_dword_ich8lan(hw,
3512                                                                  offset_to_read,
3513                                                                  &dword);
3514                                 if (ret_val)
3515                                         break;
3516                                 if ((act_offset + i) % 2 == 0)
3517                                         data[i] = (u16)(dword & 0xFFFF);
3518                                 else
3519                                         data[i] = (u16)((dword >> 16) & 0xFFFF);
3520                         }
3521                 } else {
3522                         offset_to_read = act_offset + i;
3523                         if (!(dev_spec->shadow_ram[offset+i].modified) ||
3524                             !(dev_spec->shadow_ram[offset+i+1].modified)) {
3525                                 ret_val =
3526                                    e1000_read_flash_dword_ich8lan(hw,
3527                                                                  offset_to_read,
3528                                                                  &dword);
3529                                 if (ret_val)
3530                                         break;
3531                         }
3532                         if (dev_spec->shadow_ram[offset+i].modified)
3533                                 data[i] = dev_spec->shadow_ram[offset+i].value;
3534                         else
3535                                 data[i] = (u16) (dword & 0xFFFF);
3536                         if (dev_spec->shadow_ram[offset+i].modified)
3537                                 data[i+1] =
3538                                    dev_spec->shadow_ram[offset+i+1].value;
3539                         else
3540                                 data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3541                 }
3542         }
3543
3544         nvm->ops.release(hw);
3545
3546 out:
3547         if (ret_val)
3548                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3549
3550         return ret_val;
3551 }
3552
3553 /**
3554  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3555  *  @hw: pointer to the HW structure
3556  *  @offset: The offset (in bytes) of the word(s) to read.
3557  *  @words: Size of data to read in words
3558  *  @data: Pointer to the word(s) to read at offset.
3559  *
3560  *  Reads a word(s) from the NVM using the flash access registers.
3561  **/
3562 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3563                                   u16 *data)
3564 {
3565         struct e1000_nvm_info *nvm = &hw->nvm;
3566         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3567         u32 act_offset;
3568         s32 ret_val = E1000_SUCCESS;
3569         u32 bank = 0;
3570         u16 i, word;
3571
3572         DEBUGFUNC("e1000_read_nvm_ich8lan");
3573
3574         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3575             (words == 0)) {
3576                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3577                 ret_val = -E1000_ERR_NVM;
3578                 goto out;
3579         }
3580
3581         nvm->ops.acquire(hw);
3582
3583         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3584         if (ret_val != E1000_SUCCESS) {
3585                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3586                 bank = 0;
3587         }
3588
3589         act_offset = (bank) ? nvm->flash_bank_size : 0;
3590         act_offset += offset;
3591
3592         ret_val = E1000_SUCCESS;
3593         for (i = 0; i < words; i++) {
3594                 if (dev_spec->shadow_ram[offset+i].modified) {
3595                         data[i] = dev_spec->shadow_ram[offset+i].value;
3596                 } else {
3597                         ret_val = e1000_read_flash_word_ich8lan(hw,
3598                                                                 act_offset + i,
3599                                                                 &word);
3600                         if (ret_val)
3601                                 break;
3602                         data[i] = word;
3603                 }
3604         }
3605
3606         nvm->ops.release(hw);
3607
3608 out:
3609         if (ret_val)
3610                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3611
3612         return ret_val;
3613 }
3614
3615 /**
3616  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3617  *  @hw: pointer to the HW structure
3618  *
3619  *  This function does initial flash setup so that a new read/write/erase cycle
3620  *  can be started.
3621  **/
3622 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3623 {
3624         union ich8_hws_flash_status hsfsts;
3625         s32 ret_val = -E1000_ERR_NVM;
3626
3627         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3628
3629         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3630
3631         /* Check if the flash descriptor is valid */
3632         if (!hsfsts.hsf_status.fldesvalid) {
3633                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3634                 return -E1000_ERR_NVM;
3635         }
3636
3637         /* Clear FCERR and DAEL in hw status by writing 1 */
3638         hsfsts.hsf_status.flcerr = 1;
3639         hsfsts.hsf_status.dael = 1;
3640         if (hw->mac.type >= e1000_pch_spt)
3641                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3642                                       hsfsts.regval & 0xFFFF);
3643         else
3644                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3645
3646         /* Either we should have a hardware SPI cycle in progress
3647          * bit to check against, in order to start a new cycle or
3648          * FDONE bit should be changed in the hardware so that it
3649          * is 1 after hardware reset, which can then be used as an
3650          * indication whether a cycle is in progress or has been
3651          * completed.
3652          */
3653
3654         if (!hsfsts.hsf_status.flcinprog) {
3655                 /* There is no cycle running at present,
3656                  * so we can start a cycle.
3657                  * Begin by setting Flash Cycle Done.
3658                  */
3659                 hsfsts.hsf_status.flcdone = 1;
3660                 if (hw->mac.type >= e1000_pch_spt)
3661                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3662                                               hsfsts.regval & 0xFFFF);
3663                 else
3664                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3665                                                 hsfsts.regval);
3666                 ret_val = E1000_SUCCESS;
3667         } else {
3668                 s32 i;
3669
3670                 /* Otherwise poll for sometime so the current
3671                  * cycle has a chance to end before giving up.
3672                  */
3673                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3674                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3675                                                               ICH_FLASH_HSFSTS);
3676                         if (!hsfsts.hsf_status.flcinprog) {
3677                                 ret_val = E1000_SUCCESS;
3678                                 break;
3679                         }
3680                         usec_delay(1);
3681                 }
3682                 if (ret_val == E1000_SUCCESS) {
3683                         /* Successful in waiting for previous cycle to timeout,
3684                          * now set the Flash Cycle Done.
3685                          */
3686                         hsfsts.hsf_status.flcdone = 1;
3687                         if (hw->mac.type >= e1000_pch_spt)
3688                                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3689                                                       hsfsts.regval & 0xFFFF);
3690                         else
3691                                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3692                                                         hsfsts.regval);
3693                 } else {
3694                         DEBUGOUT("Flash controller busy, cannot get access\n");
3695                 }
3696         }
3697
3698         return ret_val;
3699 }
3700
3701 /**
3702  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3703  *  @hw: pointer to the HW structure
3704  *  @timeout: maximum time to wait for completion
3705  *
3706  *  This function starts a flash cycle and waits for its completion.
3707  **/
3708 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3709 {
3710         union ich8_hws_flash_ctrl hsflctl;
3711         union ich8_hws_flash_status hsfsts;
3712         u32 i = 0;
3713
3714         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3715
3716         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3717         if (hw->mac.type >= e1000_pch_spt)
3718                 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3719         else
3720                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3721         hsflctl.hsf_ctrl.flcgo = 1;
3722
3723         if (hw->mac.type >= e1000_pch_spt)
3724                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3725                                       hsflctl.regval << 16);
3726         else
3727                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3728
3729         /* wait till FDONE bit is set to 1 */
3730         do {
3731                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3732                 if (hsfsts.hsf_status.flcdone)
3733                         break;
3734                 usec_delay(1);
3735         } while (i++ < timeout);
3736
3737         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3738                 return E1000_SUCCESS;
3739
3740         return -E1000_ERR_NVM;
3741 }
3742
3743 /**
3744  *  e1000_read_flash_dword_ich8lan - Read dword from flash
3745  *  @hw: pointer to the HW structure
3746  *  @offset: offset to data location
3747  *  @data: pointer to the location for storing the data
3748  *
3749  *  Reads the flash dword at offset into data.  Offset is converted
3750  *  to bytes before read.
3751  **/
3752 STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3753                                           u32 *data)
3754 {
3755         DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3756
3757         if (!data)
3758                 return -E1000_ERR_NVM;
3759
3760         /* Must convert word offset into bytes. */
3761         offset <<= 1;
3762
3763         return e1000_read_flash_data32_ich8lan(hw, offset, data);
3764 }
3765
3766 /**
3767  *  e1000_read_flash_word_ich8lan - Read word from flash
3768  *  @hw: pointer to the HW structure
3769  *  @offset: offset to data location
3770  *  @data: pointer to the location for storing the data
3771  *
3772  *  Reads the flash word at offset into data.  Offset is converted
3773  *  to bytes before read.
3774  **/
3775 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3776                                          u16 *data)
3777 {
3778         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3779
3780         if (!data)
3781                 return -E1000_ERR_NVM;
3782
3783         /* Must convert offset into bytes. */
3784         offset <<= 1;
3785
3786         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3787 }
3788
3789 /**
3790  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3791  *  @hw: pointer to the HW structure
3792  *  @offset: The offset of the byte to read.
3793  *  @data: Pointer to a byte to store the value read.
3794  *
3795  *  Reads a single byte from the NVM using the flash access registers.
3796  **/
3797 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3798                                          u8 *data)
3799 {
3800         s32 ret_val;
3801         u16 word = 0;
3802
3803         /* In SPT, only 32 bits access is supported,
3804          * so this function should not be called.
3805          */
3806         if (hw->mac.type >= e1000_pch_spt)
3807                 return -E1000_ERR_NVM;
3808         else
3809                 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3810
3811         if (ret_val)
3812                 return ret_val;
3813
3814         *data = (u8)word;
3815
3816         return E1000_SUCCESS;
3817 }
3818
3819 /**
3820  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3821  *  @hw: pointer to the HW structure
3822  *  @offset: The offset (in bytes) of the byte or word to read.
3823  *  @size: Size of data to read, 1=byte 2=word
3824  *  @data: Pointer to the word to store the value read.
3825  *
3826  *  Reads a byte or word from the NVM using the flash access registers.
3827  **/
3828 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3829                                          u8 size, u16 *data)
3830 {
3831         union ich8_hws_flash_status hsfsts;
3832         union ich8_hws_flash_ctrl hsflctl;
3833         u32 flash_linear_addr;
3834         u32 flash_data = 0;
3835         s32 ret_val = -E1000_ERR_NVM;
3836         u8 count = 0;
3837
3838         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3839
3840         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3841                 return -E1000_ERR_NVM;
3842         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3843                              hw->nvm.flash_base_addr);
3844
3845         do {
3846                 usec_delay(1);
3847                 /* Steps */
3848                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3849                 if (ret_val != E1000_SUCCESS)
3850                         break;
3851                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3852
3853                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3854                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3855                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3856                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3857                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3858
3859                 ret_val = e1000_flash_cycle_ich8lan(hw,
3860                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3861
3862                 /* Check if FCERR is set to 1, if set to 1, clear it
3863                  * and try the whole sequence a few more times, else
3864                  * read in (shift in) the Flash Data0, the order is
3865                  * least significant byte first msb to lsb
3866                  */
3867                 if (ret_val == E1000_SUCCESS) {
3868                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3869                         if (size == 1)
3870                                 *data = (u8)(flash_data & 0x000000FF);
3871                         else if (size == 2)
3872                                 *data = (u16)(flash_data & 0x0000FFFF);
3873                         break;
3874                 } else {
3875                         /* If we've gotten here, then things are probably
3876                          * completely hosed, but if the error condition is
3877                          * detected, it won't hurt to give it another try...
3878                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3879                          */
3880                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3881                                                               ICH_FLASH_HSFSTS);
3882                         if (hsfsts.hsf_status.flcerr) {
3883                                 /* Repeat for some time before giving up. */
3884                                 continue;
3885                         } else if (!hsfsts.hsf_status.flcdone) {
3886                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3887                                 break;
3888                         }
3889                 }
3890         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3891
3892         return ret_val;
3893 }
3894
3895 /**
3896  *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3897  *  @hw: pointer to the HW structure
3898  *  @offset: The offset (in bytes) of the dword to read.
3899  *  @data: Pointer to the dword to store the value read.
3900  *
3901  *  Reads a byte or word from the NVM using the flash access registers.
3902  **/
3903 STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3904                                            u32 *data)
3905 {
3906         union ich8_hws_flash_status hsfsts;
3907         union ich8_hws_flash_ctrl hsflctl;
3908         u32 flash_linear_addr;
3909         s32 ret_val = -E1000_ERR_NVM;
3910         u8 count = 0;
3911
3912         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3913
3914                 if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3915                     hw->mac.type < e1000_pch_spt)
3916                         return -E1000_ERR_NVM;
3917         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3918                              hw->nvm.flash_base_addr);
3919
3920         do {
3921                 usec_delay(1);
3922                 /* Steps */
3923                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3924                 if (ret_val != E1000_SUCCESS)
3925                         break;
3926                 /* In SPT, This register is in Lan memory space, not flash.
3927                  * Therefore, only 32 bit access is supported
3928                  */
3929                 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3930
3931                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3932                 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3933                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3934                 /* In SPT, This register is in Lan memory space, not flash.
3935                  * Therefore, only 32 bit access is supported
3936                  */
3937                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3938                                       (u32)hsflctl.regval << 16);
3939                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3940
3941                 ret_val = e1000_flash_cycle_ich8lan(hw,
3942                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3943
3944                 /* Check if FCERR is set to 1, if set to 1, clear it
3945                  * and try the whole sequence a few more times, else
3946                  * read in (shift in) the Flash Data0, the order is
3947                  * least significant byte first msb to lsb
3948                  */
3949                 if (ret_val == E1000_SUCCESS) {
3950                         *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3951                         break;
3952                 } else {
3953                         /* If we've gotten here, then things are probably
3954                          * completely hosed, but if the error condition is
3955                          * detected, it won't hurt to give it another try...
3956                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3957                          */
3958                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3959                                                               ICH_FLASH_HSFSTS);
3960                         if (hsfsts.hsf_status.flcerr) {
3961                                 /* Repeat for some time before giving up. */
3962                                 continue;
3963                         } else if (!hsfsts.hsf_status.flcdone) {
3964                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3965                                 break;
3966                         }
3967                 }
3968         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3969
3970         return ret_val;
3971 }
3972
3973 /**
3974  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3975  *  @hw: pointer to the HW structure
3976  *  @offset: The offset (in bytes) of the word(s) to write.
3977  *  @words: Size of data to write in words
3978  *  @data: Pointer to the word(s) to write at offset.
3979  *
3980  *  Writes a byte or word to the NVM using the flash access registers.
3981  **/
3982 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3983                                    u16 *data)
3984 {
3985         struct e1000_nvm_info *nvm = &hw->nvm;
3986         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3987         u16 i;
3988
3989         DEBUGFUNC("e1000_write_nvm_ich8lan");
3990
3991         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3992             (words == 0)) {
3993                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3994                 return -E1000_ERR_NVM;
3995         }
3996
3997         nvm->ops.acquire(hw);
3998
3999         for (i = 0; i < words; i++) {
4000                 dev_spec->shadow_ram[offset+i].modified = true;
4001                 dev_spec->shadow_ram[offset+i].value = data[i];
4002         }
4003
4004         nvm->ops.release(hw);
4005
4006         return E1000_SUCCESS;
4007 }
4008
4009 /**
4010  *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
4011  *  @hw: pointer to the HW structure
4012  *
4013  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4014  *  which writes the checksum to the shadow ram.  The changes in the shadow
4015  *  ram are then committed to the EEPROM by processing each bank at a time
4016  *  checking for the modified bit and writing only the pending changes.
4017  *  After a successful commit, the shadow ram is cleared and is ready for
4018  *  future writes.
4019  **/
4020 STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4021 {
4022         struct e1000_nvm_info *nvm = &hw->nvm;
4023         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4024         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4025         s32 ret_val;
4026         u32 dword = 0;
4027
4028         DEBUGFUNC("e1000_update_nvm_checksum_spt");
4029
4030         ret_val = e1000_update_nvm_checksum_generic(hw);
4031         if (ret_val)
4032                 goto out;
4033
4034         if (nvm->type != e1000_nvm_flash_sw)
4035                 goto out;
4036
4037         nvm->ops.acquire(hw);
4038
4039         /* We're writing to the opposite bank so if we're on bank 1,
4040          * write to bank 0 etc.  We also need to erase the segment that
4041          * is going to be written
4042          */
4043         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4044         if (ret_val != E1000_SUCCESS) {
4045                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4046                 bank = 0;
4047         }
4048
4049         if (bank == 0) {
4050                 new_bank_offset = nvm->flash_bank_size;
4051                 old_bank_offset = 0;
4052                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4053                 if (ret_val)
4054                         goto release;
4055         } else {
4056                 old_bank_offset = nvm->flash_bank_size;
4057                 new_bank_offset = 0;
4058                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4059                 if (ret_val)
4060                         goto release;
4061         }
4062         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4063                 /* Determine whether to write the value stored
4064                  * in the other NVM bank or a modified value stored
4065                  * in the shadow RAM
4066                  */
4067                 ret_val = e1000_read_flash_dword_ich8lan(hw,
4068                                                          i + old_bank_offset,
4069                                                          &dword);
4070
4071                 if (dev_spec->shadow_ram[i].modified) {
4072                         dword &= 0xffff0000;
4073                         dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4074                 }
4075                 if (dev_spec->shadow_ram[i + 1].modified) {
4076                         dword &= 0x0000ffff;
4077                         dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4078                                   << 16);
4079                 }
4080                 if (ret_val)
4081                         break;
4082
4083                 /* If the word is 0x13, then make sure the signature bits
4084                  * (15:14) are 11b until the commit has completed.
4085                  * This will allow us to write 10b which indicates the
4086                  * signature is valid.  We want to do this after the write
4087                  * has completed so that we don't mark the segment valid
4088                  * while the write is still in progress
4089                  */
4090                 if (i == E1000_ICH_NVM_SIG_WORD - 1)
4091                         dword |= E1000_ICH_NVM_SIG_MASK << 16;
4092
4093                 /* Convert offset to bytes. */
4094                 act_offset = (i + new_bank_offset) << 1;
4095
4096                 usec_delay(100);
4097
4098                 /* Write the data to the new bank. Offset in words*/
4099                 act_offset = i + new_bank_offset;
4100                 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4101                                                                 dword);
4102                 if (ret_val)
4103                         break;
4104          }
4105
4106         /* Don't bother writing the segment valid bits if sector
4107          * programming failed.
4108          */
4109         if (ret_val) {
4110                 DEBUGOUT("Flash commit failed.\n");
4111                 goto release;
4112         }
4113
4114         /* Finally validate the new segment by setting bit 15:14
4115          * to 10b in word 0x13 , this can be done without an
4116          * erase as well since these bits are 11 to start with
4117          * and we need to change bit 14 to 0b
4118          */
4119         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4120
4121         /*offset in words but we read dword*/
4122         --act_offset;
4123         ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4124
4125         if (ret_val)
4126                 goto release;
4127
4128         dword &= 0xBFFFFFFF;
4129         ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4130
4131         if (ret_val)
4132                 goto release;
4133
4134         /* And invalidate the previously valid segment by setting
4135          * its signature word (0x13) high_byte to 0b. This can be
4136          * done without an erase because flash erase sets all bits
4137          * to 1's. We can write 1's to 0's without an erase
4138          */
4139         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4140
4141         /* offset in words but we read dword*/
4142         act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4143         ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4144
4145         if (ret_val)
4146                 goto release;
4147
4148         dword &= 0x00FFFFFF;
4149         ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4150
4151         if (ret_val)
4152                 goto release;
4153
4154         /* Great!  Everything worked, we can now clear the cached entries. */
4155         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4156                 dev_spec->shadow_ram[i].modified = false;
4157                 dev_spec->shadow_ram[i].value = 0xFFFF;
4158         }
4159
4160 release:
4161         nvm->ops.release(hw);
4162
4163         /* Reload the EEPROM, or else modifications will not appear
4164          * until after the next adapter reset.
4165          */
4166         if (!ret_val) {
4167                 nvm->ops.reload(hw);
4168                 msec_delay(10);
4169         }
4170
4171 out:
4172         if (ret_val)
4173                 DEBUGOUT1("NVM update error: %d\n", ret_val);
4174
4175         return ret_val;
4176 }
4177
4178 /**
4179  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4180  *  @hw: pointer to the HW structure
4181  *
4182  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4183  *  which writes the checksum to the shadow ram.  The changes in the shadow
4184  *  ram are then committed to the EEPROM by processing each bank at a time
4185  *  checking for the modified bit and writing only the pending changes.
4186  *  After a successful commit, the shadow ram is cleared and is ready for
4187  *  future writes.
4188  **/
4189 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4190 {
4191         struct e1000_nvm_info *nvm = &hw->nvm;
4192         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4193         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4194         s32 ret_val;
4195         u16 data = 0;
4196
4197         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4198
4199         ret_val = e1000_update_nvm_checksum_generic(hw);
4200         if (ret_val)
4201                 goto out;
4202
4203         if (nvm->type != e1000_nvm_flash_sw)
4204                 goto out;
4205
4206         nvm->ops.acquire(hw);
4207
4208         /* We're writing to the opposite bank so if we're on bank 1,
4209          * write to bank 0 etc.  We also need to erase the segment that
4210          * is going to be written
4211          */
4212         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4213         if (ret_val != E1000_SUCCESS) {
4214                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4215                 bank = 0;
4216         }
4217
4218         if (bank == 0) {
4219                 new_bank_offset = nvm->flash_bank_size;
4220                 old_bank_offset = 0;
4221                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4222                 if (ret_val)
4223                         goto release;
4224         } else {
4225                 old_bank_offset = nvm->flash_bank_size;
4226                 new_bank_offset = 0;
4227                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4228                 if (ret_val)
4229                         goto release;
4230         }
4231         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4232                 if (dev_spec->shadow_ram[i].modified) {
4233                         data = dev_spec->shadow_ram[i].value;
4234                 } else {
4235                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
4236                                                                 old_bank_offset,
4237                                                                 &data);
4238                         if (ret_val)
4239                                 break;
4240                 }
4241                 /* If the word is 0x13, then make sure the signature bits
4242                  * (15:14) are 11b until the commit has completed.
4243                  * This will allow us to write 10b which indicates the
4244                  * signature is valid.  We want to do this after the write
4245                  * has completed so that we don't mark the segment valid
4246                  * while the write is still in progress
4247                  */
4248                 if (i == E1000_ICH_NVM_SIG_WORD)
4249                         data |= E1000_ICH_NVM_SIG_MASK;
4250
4251                 /* Convert offset to bytes. */
4252                 act_offset = (i + new_bank_offset) << 1;
4253
4254                 usec_delay(100);
4255
4256                 /* Write the bytes to the new bank. */
4257                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4258                                                                act_offset,
4259                                                                (u8)data);
4260                 if (ret_val)
4261                         break;
4262
4263                 usec_delay(100);
4264                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4265                                                           act_offset + 1,
4266                                                           (u8)(data >> 8));
4267                 if (ret_val)
4268                         break;
4269         }
4270
4271         /* Don't bother writing the segment valid bits if sector
4272          * programming failed.
4273          */
4274         if (ret_val) {
4275                 DEBUGOUT("Flash commit failed.\n");
4276                 goto release;
4277         }
4278
4279         /* Finally validate the new segment by setting bit 15:14
4280          * to 10b in word 0x13 , this can be done without an
4281          * erase as well since these bits are 11 to start with
4282          * and we need to change bit 14 to 0b
4283          */
4284         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4285         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4286         if (ret_val)
4287                 goto release;
4288
4289         data &= 0xBFFF;
4290         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4291                                                        (u8)(data >> 8));
4292         if (ret_val)
4293                 goto release;
4294
4295         /* And invalidate the previously valid segment by setting
4296          * its signature word (0x13) high_byte to 0b. This can be
4297          * done without an erase because flash erase sets all bits
4298          * to 1's. We can write 1's to 0's without an erase
4299          */
4300         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4301
4302         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4303
4304         if (ret_val)
4305                 goto release;
4306
4307         /* Great!  Everything worked, we can now clear the cached entries. */
4308         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4309                 dev_spec->shadow_ram[i].modified = false;
4310                 dev_spec->shadow_ram[i].value = 0xFFFF;
4311         }
4312
4313 release:
4314         nvm->ops.release(hw);
4315
4316         /* Reload the EEPROM, or else modifications will not appear
4317          * until after the next adapter reset.
4318          */
4319         if (!ret_val) {
4320                 nvm->ops.reload(hw);
4321                 msec_delay(10);
4322         }
4323
4324 out:
4325         if (ret_val)
4326                 DEBUGOUT1("NVM update error: %d\n", ret_val);
4327
4328         return ret_val;
4329 }
4330
4331 /**
4332  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4333  *  @hw: pointer to the HW structure
4334  *
4335  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4336  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4337  *  calculated, in which case we need to calculate the checksum and set bit 6.
4338  **/
4339 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4340 {
4341         s32 ret_val;
4342         u16 data;
4343         u16 word;
4344         u16 valid_csum_mask;
4345
4346         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4347
4348         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4349          * the checksum needs to be fixed.  This bit is an indication that
4350          * the NVM was prepared by OEM software and did not calculate
4351          * the checksum...a likely scenario.
4352          */
4353         switch (hw->mac.type) {
4354         case e1000_pch_lpt:
4355         case e1000_pch_spt:
4356                 word = NVM_COMPAT;
4357                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4358                 break;
4359         default:
4360                 word = NVM_FUTURE_INIT_WORD1;
4361                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4362                 break;
4363         }
4364
4365         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4366         if (ret_val)
4367                 return ret_val;
4368
4369         if (!(data & valid_csum_mask)) {
4370                 data |= valid_csum_mask;
4371                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4372                 if (ret_val)
4373                         return ret_val;
4374                 ret_val = hw->nvm.ops.update(hw);
4375                 if (ret_val)
4376                         return ret_val;
4377         }
4378
4379         return e1000_validate_nvm_checksum_generic(hw);
4380 }
4381
4382 /**
4383  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4384  *  @hw: pointer to the HW structure
4385  *  @offset: The offset (in bytes) of the byte/word to read.
4386  *  @size: Size of data to read, 1=byte 2=word
4387  *  @data: The byte(s) to write to the NVM.
4388  *
4389  *  Writes one/two bytes to the NVM using the flash access registers.
4390  **/
4391 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4392                                           u8 size, u16 data)
4393 {
4394         union ich8_hws_flash_status hsfsts;
4395         union ich8_hws_flash_ctrl hsflctl;
4396         u32 flash_linear_addr;
4397         u32 flash_data = 0;
4398         s32 ret_val;
4399         u8 count = 0;
4400
4401         DEBUGFUNC("e1000_write_ich8_data");
4402
4403         if (hw->mac.type >= e1000_pch_spt) {
4404                 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4405                         return -E1000_ERR_NVM;
4406         } else {
4407                 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4408                         return -E1000_ERR_NVM;
4409         }
4410
4411         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4412                              hw->nvm.flash_base_addr);
4413
4414         do {
4415                 usec_delay(1);
4416                 /* Steps */
4417                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4418                 if (ret_val != E1000_SUCCESS)
4419                         break;
4420                 /* In SPT, This register is in Lan memory space, not
4421                  * flash.  Therefore, only 32 bit access is supported
4422                  */
4423                 if (hw->mac.type >= e1000_pch_spt)
4424                         hsflctl.regval =
4425                             E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4426                 else
4427                         hsflctl.regval =
4428                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4429
4430                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4431                 hsflctl.hsf_ctrl.fldbcount = size - 1;
4432                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4433                 /* In SPT, This register is in Lan memory space,
4434                  * not flash.  Therefore, only 32 bit access is
4435                  * supported
4436                  */
4437                 if (hw->mac.type >= e1000_pch_spt)
4438                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4439                                               hsflctl.regval << 16);
4440                 else
4441                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4442                                                 hsflctl.regval);
4443
4444                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4445
4446                 if (size == 1)
4447                         flash_data = (u32)data & 0x00FF;
4448                 else
4449                         flash_data = (u32)data;
4450
4451                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4452
4453                 /* check if FCERR is set to 1 , if set to 1, clear it
4454                  * and try the whole sequence a few more times else done
4455                  */
4456                 ret_val =
4457                     e1000_flash_cycle_ich8lan(hw,
4458                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4459                 if (ret_val == E1000_SUCCESS)
4460                         break;
4461
4462                 /* If we're here, then things are most likely
4463                  * completely hosed, but if the error condition
4464                  * is detected, it won't hurt to give it another
4465                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4466                  */
4467                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4468                 if (hsfsts.hsf_status.flcerr)
4469                         /* Repeat for some time before giving up. */
4470                         continue;
4471                 if (!hsfsts.hsf_status.flcdone) {
4472                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4473                         break;
4474                 }
4475         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4476
4477         return ret_val;
4478 }
4479
4480 /**
4481 *  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4482 *  @hw: pointer to the HW structure
4483 *  @offset: The offset (in bytes) of the dwords to read.
4484 *  @data: The 4 bytes to write to the NVM.
4485 *
4486 *  Writes one/two/four bytes to the NVM using the flash access registers.
4487 **/
4488 STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4489                                             u32 data)
4490 {
4491         union ich8_hws_flash_status hsfsts;
4492         union ich8_hws_flash_ctrl hsflctl;
4493         u32 flash_linear_addr;
4494         s32 ret_val;
4495         u8 count = 0;
4496
4497         DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4498
4499         if (hw->mac.type >= e1000_pch_spt) {
4500                 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4501                         return -E1000_ERR_NVM;
4502         }
4503         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4504                              hw->nvm.flash_base_addr);
4505         do {
4506                 usec_delay(1);
4507                 /* Steps */
4508                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4509                 if (ret_val != E1000_SUCCESS)
4510                         break;
4511
4512                 /* In SPT, This register is in Lan memory space, not
4513                  * flash.  Therefore, only 32 bit access is supported
4514                  */
4515                 if (hw->mac.type >= e1000_pch_spt)
4516                         hsflctl.regval = E1000_READ_FLASH_REG(hw,
4517                                                               ICH_FLASH_HSFSTS)
4518                                          >> 16;
4519                 else
4520                         hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4521                                                               ICH_FLASH_HSFCTL);
4522
4523                 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4524                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4525
4526                 /* In SPT, This register is in Lan memory space,
4527                  * not flash.  Therefore, only 32 bit access is
4528                  * supported
4529                  */
4530                 if (hw->mac.type >= e1000_pch_spt)
4531                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4532                                               hsflctl.regval << 16);
4533                 else
4534                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4535                                                 hsflctl.regval);
4536
4537                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4538
4539                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4540
4541                 /* check if FCERR is set to 1 , if set to 1, clear it
4542                  * and try the whole sequence a few more times else done
4543                  */
4544                 ret_val = e1000_flash_cycle_ich8lan(hw,
4545                                                ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4546
4547                 if (ret_val == E1000_SUCCESS)
4548                         break;
4549
4550                 /* If we're here, then things are most likely
4551                  * completely hosed, but if the error condition
4552                  * is detected, it won't hurt to give it another
4553                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4554                  */
4555                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4556
4557                 if (hsfsts.hsf_status.flcerr)
4558                         /* Repeat for some time before giving up. */
4559                         continue;
4560                 if (!hsfsts.hsf_status.flcdone) {
4561                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4562                         break;
4563                 }
4564         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4565
4566         return ret_val;
4567 }
4568
4569 /**
4570  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4571  *  @hw: pointer to the HW structure
4572  *  @offset: The index of the byte to read.
4573  *  @data: The byte to write to the NVM.
4574  *
4575  *  Writes a single byte to the NVM using the flash access registers.
4576  **/
4577 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4578                                           u8 data)
4579 {
4580         u16 word = (u16)data;
4581
4582         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4583
4584         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4585 }
4586
4587 /**
4588 *  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4589 *  @hw: pointer to the HW structure
4590 *  @offset: The offset of the word to write.
4591 *  @dword: The dword to write to the NVM.
4592 *
4593 *  Writes a single dword to the NVM using the flash access registers.
4594 *  Goes through a retry algorithm before giving up.
4595 **/
4596 STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4597                                                  u32 offset, u32 dword)
4598 {
4599         s32 ret_val;
4600         u16 program_retries;
4601
4602         DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4603
4604         /* Must convert word offset into bytes. */
4605         offset <<= 1;
4606
4607         ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4608
4609         if (!ret_val)
4610                 return ret_val;
4611         for (program_retries = 0; program_retries < 100; program_retries++) {
4612                 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4613                 usec_delay(100);
4614                 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4615                 if (ret_val == E1000_SUCCESS)
4616                         break;
4617         }
4618         if (program_retries == 100)
4619                 return -E1000_ERR_NVM;
4620
4621         return E1000_SUCCESS;
4622 }
4623
4624 /**
4625  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4626  *  @hw: pointer to the HW structure
4627  *  @offset: The offset of the byte to write.
4628  *  @byte: The byte to write to the NVM.
4629  *
4630  *  Writes a single byte to the NVM using the flash access registers.
4631  *  Goes through a retry algorithm before giving up.
4632  **/
4633 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4634                                                 u32 offset, u8 byte)
4635 {
4636         s32 ret_val;
4637         u16 program_retries;
4638
4639         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4640
4641         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4642         if (!ret_val)
4643                 return ret_val;
4644
4645         for (program_retries = 0; program_retries < 100; program_retries++) {
4646                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4647                 usec_delay(100);
4648                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4649                 if (ret_val == E1000_SUCCESS)
4650                         break;
4651         }
4652         if (program_retries == 100)
4653                 return -E1000_ERR_NVM;
4654
4655         return E1000_SUCCESS;
4656 }
4657
4658 /**
4659  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4660  *  @hw: pointer to the HW structure
4661  *  @bank: 0 for first bank, 1 for second bank, etc.
4662  *
4663  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4664  *  bank N is 4096 * N + flash_reg_addr.
4665  **/
4666 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4667 {
4668         struct e1000_nvm_info *nvm = &hw->nvm;
4669         union ich8_hws_flash_status hsfsts;
4670         union ich8_hws_flash_ctrl hsflctl;
4671         u32 flash_linear_addr;
4672         /* bank size is in 16bit words - adjust to bytes */
4673         u32 flash_bank_size = nvm->flash_bank_size * 2;
4674         s32 ret_val;
4675         s32 count = 0;
4676         s32 j, iteration, sector_size;
4677
4678         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4679
4680         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4681
4682         /* Determine HW Sector size: Read BERASE bits of hw flash status
4683          * register
4684          * 00: The Hw sector is 256 bytes, hence we need to erase 16
4685          *     consecutive sectors.  The start index for the nth Hw sector
4686          *     can be calculated as = bank * 4096 + n * 256
4687          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4688          *     The start index for the nth Hw sector can be calculated
4689          *     as = bank * 4096
4690          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4691          *     (ich9 only, otherwise error condition)
4692          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4693          */
4694         switch (hsfsts.hsf_status.berasesz) {
4695         case 0:
4696                 /* Hw sector size 256 */
4697                 sector_size = ICH_FLASH_SEG_SIZE_256;
4698                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4699                 break;
4700         case 1:
4701                 sector_size = ICH_FLASH_SEG_SIZE_4K;
4702                 iteration = 1;
4703                 break;
4704         case 2:
4705                 sector_size = ICH_FLASH_SEG_SIZE_8K;
4706                 iteration = 1;
4707                 break;
4708         case 3:
4709                 sector_size = ICH_FLASH_SEG_SIZE_64K;
4710                 iteration = 1;
4711                 break;
4712         default:
4713                 return -E1000_ERR_NVM;
4714         }
4715
4716         /* Start with the base address, then add the sector offset. */
4717         flash_linear_addr = hw->nvm.flash_base_addr;
4718         flash_linear_addr += (bank) ? flash_bank_size : 0;
4719
4720         for (j = 0; j < iteration; j++) {
4721                 do {
4722                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4723
4724                         /* Steps */
4725                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
4726                         if (ret_val)
4727                                 return ret_val;
4728
4729                         /* Write a value 11 (block Erase) in Flash
4730                          * Cycle field in hw flash control
4731                          */
4732                         if (hw->mac.type >= e1000_pch_spt)
4733                                 hsflctl.regval =
4734                                     E1000_READ_FLASH_REG(hw,
4735                                                          ICH_FLASH_HSFSTS)>>16;
4736                         else
4737                                 hsflctl.regval =
4738                                     E1000_READ_FLASH_REG16(hw,
4739                                                            ICH_FLASH_HSFCTL);
4740
4741                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4742                         if (hw->mac.type >= e1000_pch_spt)
4743                                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4744                                                       hsflctl.regval << 16);
4745                         else
4746                                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4747                                                         hsflctl.regval);
4748
4749                         /* Write the last 24 bits of an index within the
4750                          * block into Flash Linear address field in Flash
4751                          * Address.
4752                          */
4753                         flash_linear_addr += (j * sector_size);
4754                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4755                                               flash_linear_addr);
4756
4757                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4758                         if (ret_val == E1000_SUCCESS)
4759                                 break;
4760
4761                         /* Check if FCERR is set to 1.  If 1,
4762                          * clear it and try the whole sequence
4763                          * a few more times else Done
4764                          */
4765                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4766                                                       ICH_FLASH_HSFSTS);
4767                         if (hsfsts.hsf_status.flcerr)
4768                                 /* repeat for some time before giving up */
4769                                 continue;
4770                         else if (!hsfsts.hsf_status.flcdone)
4771                                 return ret_val;
4772                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4773         }
4774
4775         return E1000_SUCCESS;
4776 }
4777
4778 /**
4779  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4780  *  @hw: pointer to the HW structure
4781  *  @data: Pointer to the LED settings
4782  *
4783  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4784  *  settings is all 0's or F's, set the LED default to a valid LED default
4785  *  setting.
4786  **/
4787 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4788 {
4789         s32 ret_val;
4790
4791         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4792
4793         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4794         if (ret_val) {
4795                 DEBUGOUT("NVM Read Error\n");
4796                 return ret_val;
4797         }
4798
4799         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4800                 *data = ID_LED_DEFAULT_ICH8LAN;
4801
4802         return E1000_SUCCESS;
4803 }
4804
4805 /**
4806  *  e1000_id_led_init_pchlan - store LED configurations
4807  *  @hw: pointer to the HW structure
4808  *
4809  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4810  *  the PHY LED configuration register.
4811  *
4812  *  PCH also does not have an "always on" or "always off" mode which
4813  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4814  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4815  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4816  *  link based on logic in e1000_led_[on|off]_pchlan().
4817  **/
4818 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4819 {
4820         struct e1000_mac_info *mac = &hw->mac;
4821         s32 ret_val;
4822         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4823         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4824         u16 data, i, temp, shift;
4825
4826         DEBUGFUNC("e1000_id_led_init_pchlan");
4827
4828         /* Get default ID LED modes */
4829         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4830         if (ret_val)
4831                 return ret_val;
4832
4833         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4834         mac->ledctl_mode1 = mac->ledctl_default;
4835         mac->ledctl_mode2 = mac->ledctl_default;
4836
4837         for (i = 0; i < 4; i++) {
4838                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4839                 shift = (i * 5);
4840                 switch (temp) {
4841                 case ID_LED_ON1_DEF2:
4842                 case ID_LED_ON1_ON2:
4843                 case ID_LED_ON1_OFF2:
4844                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4845                         mac->ledctl_mode1 |= (ledctl_on << shift);
4846                         break;
4847                 case ID_LED_OFF1_DEF2:
4848                 case ID_LED_OFF1_ON2:
4849                 case ID_LED_OFF1_OFF2:
4850                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4851                         mac->ledctl_mode1 |= (ledctl_off << shift);
4852                         break;
4853                 default:
4854                         /* Do nothing */
4855                         break;
4856                 }
4857                 switch (temp) {
4858                 case ID_LED_DEF1_ON2:
4859                 case ID_LED_ON1_ON2:
4860                 case ID_LED_OFF1_ON2:
4861                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4862                         mac->ledctl_mode2 |= (ledctl_on << shift);
4863                         break;
4864                 case ID_LED_DEF1_OFF2:
4865                 case ID_LED_ON1_OFF2:
4866                 case ID_LED_OFF1_OFF2:
4867                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4868                         mac->ledctl_mode2 |= (ledctl_off << shift);
4869                         break;
4870                 default:
4871                         /* Do nothing */
4872                         break;
4873                 }
4874         }
4875
4876         return E1000_SUCCESS;
4877 }
4878
4879 /**
4880  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4881  *  @hw: pointer to the HW structure
4882  *
4883  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4884  *  register, so the the bus width is hard coded.
4885  **/
4886 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4887 {
4888         struct e1000_bus_info *bus = &hw->bus;
4889         s32 ret_val;
4890
4891         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4892
4893         ret_val = e1000_get_bus_info_pcie_generic(hw);
4894
4895         /* ICH devices are "PCI Express"-ish.  They have
4896          * a configuration space, but do not contain
4897          * PCI Express Capability registers, so bus width
4898          * must be hardcoded.
4899          */
4900         if (bus->width == e1000_bus_width_unknown)
4901                 bus->width = e1000_bus_width_pcie_x1;
4902
4903         return ret_val;
4904 }
4905
4906 /**
4907  *  e1000_reset_hw_ich8lan - Reset the hardware
4908  *  @hw: pointer to the HW structure
4909  *
4910  *  Does a full reset of the hardware which includes a reset of the PHY and
4911  *  MAC.
4912  **/
4913 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4914 {
4915         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4916         u16 kum_cfg;
4917         u32 ctrl, reg;
4918         s32 ret_val;
4919
4920         DEBUGFUNC("e1000_reset_hw_ich8lan");
4921
4922         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4923          * on the last TLP read/write transaction when MAC is reset.
4924          */
4925         ret_val = e1000_disable_pcie_master_generic(hw);
4926         if (ret_val)
4927                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4928
4929         DEBUGOUT("Masking off all interrupts\n");
4930         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4931
4932         /* Disable the Transmit and Receive units.  Then delay to allow
4933          * any pending transactions to complete before we hit the MAC
4934          * with the global reset.
4935          */
4936         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4937         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4938         E1000_WRITE_FLUSH(hw);
4939
4940         msec_delay(10);
4941
4942         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4943         if (hw->mac.type == e1000_ich8lan) {
4944                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4945                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4946                 /* Set Packet Buffer Size to 16k. */
4947                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4948         }
4949
4950         if (hw->mac.type == e1000_pchlan) {
4951                 /* Save the NVM K1 bit setting*/
4952                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4953                 if (ret_val)
4954                         return ret_val;
4955
4956                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4957                         dev_spec->nvm_k1_enabled = true;
4958                 else
4959                         dev_spec->nvm_k1_enabled = false;
4960         }
4961
4962         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4963
4964         if (!hw->phy.ops.check_reset_block(hw)) {
4965                 /* Full-chip reset requires MAC and PHY reset at the same
4966                  * time to make sure the interface between MAC and the
4967                  * external PHY is reset.
4968                  */
4969                 ctrl |= E1000_CTRL_PHY_RST;
4970
4971                 /* Gate automatic PHY configuration by hardware on
4972                  * non-managed 82579
4973                  */
4974                 if ((hw->mac.type == e1000_pch2lan) &&
4975                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4976                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4977         }
4978         ret_val = e1000_acquire_swflag_ich8lan(hw);
4979         DEBUGOUT("Issuing a global reset to ich8lan\n");
4980         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4981         /* cannot issue a flush here because it hangs the hardware */
4982         msec_delay(20);
4983
4984         /* Set Phy Config Counter to 50msec */
4985         if (hw->mac.type == e1000_pch2lan) {
4986                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4987                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4988                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4989                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4990         }
4991
4992         if (!ret_val)
4993                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4994
4995         if (ctrl & E1000_CTRL_PHY_RST) {
4996                 ret_val = hw->phy.ops.get_cfg_done(hw);
4997                 if (ret_val)
4998                         return ret_val;
4999
5000                 ret_val = e1000_post_phy_reset_ich8lan(hw);
5001                 if (ret_val)
5002                         return ret_val;
5003         }
5004
5005         /* For PCH, this write will make sure that any noise
5006          * will be detected as a CRC error and be dropped rather than show up
5007          * as a bad packet to the DMA engine.
5008          */
5009         if (hw->mac.type == e1000_pchlan)
5010                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5011
5012         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5013         E1000_READ_REG(hw, E1000_ICR);
5014
5015         reg = E1000_READ_REG(hw, E1000_KABGTXD);
5016         reg |= E1000_KABGTXD_BGSQLBIAS;
5017         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5018
5019         return E1000_SUCCESS;
5020 }
5021
5022 /**
5023  *  e1000_init_hw_ich8lan - Initialize the hardware
5024  *  @hw: pointer to the HW structure
5025  *
5026  *  Prepares the hardware for transmit and receive by doing the following:
5027  *   - initialize hardware bits
5028  *   - initialize LED identification
5029  *   - setup receive address registers
5030  *   - setup flow control
5031  *   - setup transmit descriptors
5032  *   - clear statistics
5033  **/
5034 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5035 {
5036         struct e1000_mac_info *mac = &hw->mac;
5037         u32 ctrl_ext, txdctl, snoop;
5038         s32 ret_val;
5039         u16 i;
5040
5041         DEBUGFUNC("e1000_init_hw_ich8lan");
5042
5043         e1000_initialize_hw_bits_ich8lan(hw);
5044
5045         /* Initialize identification LED */
5046         ret_val = mac->ops.id_led_init(hw);
5047         /* An error is not fatal and we should not stop init due to this */
5048         if (ret_val)
5049                 DEBUGOUT("Error initializing identification LED\n");
5050
5051         /* Setup the receive address. */
5052         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5053
5054         /* Zero out the Multicast HASH table */
5055         DEBUGOUT("Zeroing the MTA\n");
5056         for (i = 0; i < mac->mta_reg_count; i++)
5057                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5058
5059         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
5060          * the ME.  Disable wakeup by clearing the host wakeup bit.
5061          * Reset the phy after disabling host wakeup to reset the Rx buffer.
5062          */
5063         if (hw->phy.type == e1000_phy_82578) {
5064                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5065                 i &= ~BM_WUC_HOST_WU_BIT;
5066                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5067                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
5068                 if (ret_val)
5069                         return ret_val;
5070         }
5071
5072         /* Setup link and flow control */
5073         ret_val = mac->ops.setup_link(hw);
5074
5075         /* Set the transmit descriptor write-back policy for both queues */
5076         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5077         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5078                   E1000_TXDCTL_FULL_TX_DESC_WB);
5079         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5080                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5081         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5082         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5083         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5084                   E1000_TXDCTL_FULL_TX_DESC_WB);
5085         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5086                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5087         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5088
5089         /* ICH8 has opposite polarity of no_snoop bits.
5090          * By default, we should use snoop behavior.
5091          */
5092         if (mac->type == e1000_ich8lan)
5093                 snoop = PCIE_ICH8_SNOOP_ALL;
5094         else
5095                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5096         e1000_set_pcie_no_snoop_generic(hw, snoop);
5097
5098         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5099         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5100         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5101
5102         /* Clear all of the statistics registers (clear on read).  It is
5103          * important that we do this after we have tried to establish link
5104          * because the symbol error count will increment wildly if there
5105          * is no link.
5106          */
5107         e1000_clear_hw_cntrs_ich8lan(hw);
5108
5109         return ret_val;
5110 }
5111
5112 /**
5113  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5114  *  @hw: pointer to the HW structure
5115  *
5116  *  Sets/Clears required hardware bits necessary for correctly setting up the
5117  *  hardware for transmit and receive.
5118  **/
5119 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5120 {
5121         u32 reg;
5122
5123         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5124
5125         /* Extended Device Control */
5126         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5127         reg |= (1 << 22);
5128         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5129         if (hw->mac.type >= e1000_pchlan)
5130                 reg |= E1000_CTRL_EXT_PHYPDEN;
5131         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5132
5133         /* Transmit Descriptor Control 0 */
5134         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5135         reg |= (1 << 22);
5136         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5137
5138         /* Transmit Descriptor Control 1 */
5139         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5140         reg |= (1 << 22);
5141         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5142
5143         /* Transmit Arbitration Control 0 */
5144         reg = E1000_READ_REG(hw, E1000_TARC(0));
5145         if (hw->mac.type == e1000_ich8lan)
5146                 reg |= (1 << 28) | (1 << 29);
5147         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5148         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5149
5150         /* Transmit Arbitration Control 1 */
5151         reg = E1000_READ_REG(hw, E1000_TARC(1));
5152         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5153                 reg &= ~(1 << 28);
5154         else
5155                 reg |= (1 << 28);
5156         reg |= (1 << 24) | (1 << 26) | (1 << 30);
5157         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5158
5159         /* Device Status */
5160         if (hw->mac.type == e1000_ich8lan) {
5161                 reg = E1000_READ_REG(hw, E1000_STATUS);
5162                 reg &= ~(1 << 31);
5163                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5164         }
5165
5166         /* work-around descriptor data corruption issue during nfs v2 udp
5167          * traffic, just disable the nfs filtering capability
5168          */
5169         reg = E1000_READ_REG(hw, E1000_RFCTL);
5170         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5171
5172         /* Disable IPv6 extension header parsing because some malformed
5173          * IPv6 headers can hang the Rx.
5174          */
5175         if (hw->mac.type == e1000_ich8lan)
5176                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5177         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5178
5179         /* Enable ECC on Lynxpoint */
5180         if (hw->mac.type >= e1000_pch_lpt) {
5181                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5182                 reg |= E1000_PBECCSTS_ECC_ENABLE;
5183                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5184
5185                 reg = E1000_READ_REG(hw, E1000_CTRL);
5186                 reg |= E1000_CTRL_MEHE;
5187                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5188         }
5189
5190         return;
5191 }
5192
5193 /**
5194  *  e1000_setup_link_ich8lan - Setup flow control and link settings
5195  *  @hw: pointer to the HW structure
5196  *
5197  *  Determines which flow control settings to use, then configures flow
5198  *  control.  Calls the appropriate media-specific link configuration
5199  *  function.  Assuming the adapter has a valid link partner, a valid link
5200  *  should be established.  Assumes the hardware has previously been reset
5201  *  and the transmitter and receiver are not enabled.
5202  **/
5203 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5204 {
5205         s32 ret_val;
5206
5207         DEBUGFUNC("e1000_setup_link_ich8lan");
5208
5209         if (hw->phy.ops.check_reset_block(hw))
5210                 return E1000_SUCCESS;
5211
5212         /* ICH parts do not have a word in the NVM to determine
5213          * the default flow control setting, so we explicitly
5214          * set it to full.
5215          */
5216         if (hw->fc.requested_mode == e1000_fc_default)
5217                 hw->fc.requested_mode = e1000_fc_full;
5218
5219         /* Save off the requested flow control mode for use later.  Depending
5220          * on the link partner's capabilities, we may or may not use this mode.
5221          */
5222         hw->fc.current_mode = hw->fc.requested_mode;
5223
5224         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5225                 hw->fc.current_mode);
5226
5227         /* Continue to configure the copper link. */
5228         ret_val = hw->mac.ops.setup_physical_interface(hw);
5229         if (ret_val)
5230                 return ret_val;
5231
5232         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5233         if ((hw->phy.type == e1000_phy_82578) ||
5234             (hw->phy.type == e1000_phy_82579) ||
5235             (hw->phy.type == e1000_phy_i217) ||
5236             (hw->phy.type == e1000_phy_82577)) {
5237                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5238
5239                 ret_val = hw->phy.ops.write_reg(hw,
5240                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
5241                                              hw->fc.pause_time);
5242                 if (ret_val)
5243                         return ret_val;
5244         }
5245
5246         return e1000_set_fc_watermarks_generic(hw);
5247 }
5248
5249 /**
5250  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5251  *  @hw: pointer to the HW structure
5252  *
5253  *  Configures the kumeran interface to the PHY to wait the appropriate time
5254  *  when polling the PHY, then call the generic setup_copper_link to finish
5255  *  configuring the copper link.
5256  **/
5257 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5258 {
5259         u32 ctrl;
5260         s32 ret_val;
5261         u16 reg_data;
5262
5263         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5264
5265         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5266         ctrl |= E1000_CTRL_SLU;
5267         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5268         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5269
5270         /* Set the mac to wait the maximum time between each iteration
5271          * and increase the max iterations when polling the phy;
5272          * this fixes erroneous timeouts at 10Mbps.
5273          */
5274         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5275                                                0xFFFF);
5276         if (ret_val)
5277                 return ret_val;
5278         ret_val = e1000_read_kmrn_reg_generic(hw,
5279                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
5280                                               &reg_data);
5281         if (ret_val)
5282                 return ret_val;
5283         reg_data |= 0x3F;
5284         ret_val = e1000_write_kmrn_reg_generic(hw,
5285                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
5286                                                reg_data);
5287         if (ret_val)
5288                 return ret_val;
5289
5290         switch (hw->phy.type) {
5291         case e1000_phy_igp_3:
5292                 ret_val = e1000_copper_link_setup_igp(hw);
5293                 if (ret_val)
5294                         return ret_val;
5295                 break;
5296         case e1000_phy_bm:
5297         case e1000_phy_82578:
5298                 ret_val = e1000_copper_link_setup_m88(hw);
5299                 if (ret_val)
5300                         return ret_val;
5301                 break;
5302         case e1000_phy_82577:
5303         case e1000_phy_82579:
5304                 ret_val = e1000_copper_link_setup_82577(hw);
5305                 if (ret_val)
5306                         return ret_val;
5307                 break;
5308         case e1000_phy_ife:
5309                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5310                                                &reg_data);
5311                 if (ret_val)
5312                         return ret_val;
5313
5314                 reg_data &= ~IFE_PMC_AUTO_MDIX;
5315
5316                 switch (hw->phy.mdix) {
5317                 case 1:
5318                         reg_data &= ~IFE_PMC_FORCE_MDIX;
5319                         break;
5320                 case 2:
5321                         reg_data |= IFE_PMC_FORCE_MDIX;
5322                         break;
5323                 case 0:
5324                 default:
5325                         reg_data |= IFE_PMC_AUTO_MDIX;
5326                         break;
5327                 }
5328                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5329                                                 reg_data);
5330                 if (ret_val)
5331                         return ret_val;
5332                 break;
5333         default:
5334                 break;
5335         }
5336
5337         return e1000_setup_copper_link_generic(hw);
5338 }
5339
5340 /**
5341  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5342  *  @hw: pointer to the HW structure
5343  *
5344  *  Calls the PHY specific link setup function and then calls the
5345  *  generic setup_copper_link to finish configuring the link for
5346  *  Lynxpoint PCH devices
5347  **/
5348 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5349 {
5350         u32 ctrl;
5351         s32 ret_val;
5352
5353         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5354
5355         ctrl = E1000_READ_REG(hw, E1000_CTRL);
5356         ctrl |= E1000_CTRL_SLU;
5357         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5358         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5359
5360         ret_val = e1000_copper_link_setup_82577(hw);
5361         if (ret_val)
5362                 return ret_val;
5363
5364         return e1000_setup_copper_link_generic(hw);
5365 }
5366
5367 /**
5368  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5369  *  @hw: pointer to the HW structure
5370  *  @speed: pointer to store current link speed
5371  *  @duplex: pointer to store the current link duplex
5372  *
5373  *  Calls the generic get_speed_and_duplex to retrieve the current link
5374  *  information and then calls the Kumeran lock loss workaround for links at
5375  *  gigabit speeds.
5376  **/
5377 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5378                                           u16 *duplex)
5379 {
5380         s32 ret_val;
5381
5382         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5383
5384         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5385         if (ret_val)
5386                 return ret_val;
5387
5388         if ((hw->mac.type == e1000_ich8lan) &&
5389             (hw->phy.type == e1000_phy_igp_3) &&
5390             (*speed == SPEED_1000)) {
5391                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5392         }
5393
5394         return ret_val;
5395 }
5396
5397 /**
5398  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5399  *  @hw: pointer to the HW structure
5400  *
5401  *  Work-around for 82566 Kumeran PCS lock loss:
5402  *  On link status change (i.e. PCI reset, speed change) and link is up and
5403  *  speed is gigabit-
5404  *    0) if workaround is optionally disabled do nothing
5405  *    1) wait 1ms for Kumeran link to come up
5406  *    2) check Kumeran Diagnostic register PCS lock loss bit
5407  *    3) if not set the link is locked (all is good), otherwise...
5408  *    4) reset the PHY
5409  *    5) repeat up to 10 times
5410  *  Note: this is only called for IGP3 copper when speed is 1gb.
5411  **/
5412 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5413 {
5414         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5415         u32 phy_ctrl;
5416         s32 ret_val;
5417         u16 i, data;
5418         bool link;
5419
5420         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5421
5422         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5423                 return E1000_SUCCESS;
5424
5425         /* Make sure link is up before proceeding.  If not just return.
5426          * Attempting this while link is negotiating fouled up link
5427          * stability
5428          */
5429         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5430         if (!link)
5431                 return E1000_SUCCESS;
5432
5433         for (i = 0; i < 10; i++) {
5434                 /* read once to clear */
5435                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5436                 if (ret_val)
5437                         return ret_val;
5438                 /* and again to get new status */
5439                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5440                 if (ret_val)
5441                         return ret_val;
5442
5443                 /* check for PCS lock */
5444                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5445                         return E1000_SUCCESS;
5446
5447                 /* Issue PHY reset */
5448                 hw->phy.ops.reset(hw);
5449                 msec_delay_irq(5);
5450         }
5451         /* Disable GigE link negotiation */
5452         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5453         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5454                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5455         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5456
5457         /* Call gig speed drop workaround on Gig disable before accessing
5458          * any PHY registers
5459          */
5460         e1000_gig_downshift_workaround_ich8lan(hw);
5461
5462         /* unable to acquire PCS lock */
5463         return -E1000_ERR_PHY;
5464 }
5465
5466 /**
5467  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5468  *  @hw: pointer to the HW structure
5469  *  @state: boolean value used to set the current Kumeran workaround state
5470  *
5471  *  If ICH8, set the current Kumeran workaround state (enabled - true
5472  *  /disabled - false).
5473  **/
5474 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5475                                                  bool state)
5476 {
5477         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5478
5479         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5480
5481         if (hw->mac.type != e1000_ich8lan) {
5482                 DEBUGOUT("Workaround applies to ICH8 only.\n");
5483                 return;
5484         }
5485
5486         dev_spec->kmrn_lock_loss_workaround_enabled = state;
5487
5488         return;
5489 }
5490
5491 /**
5492  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5493  *  @hw: pointer to the HW structure
5494  *
5495  *  Workaround for 82566 power-down on D3 entry:
5496  *    1) disable gigabit link
5497  *    2) write VR power-down enable
5498  *    3) read it back
5499  *  Continue if successful, else issue LCD reset and repeat
5500  **/
5501 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5502 {
5503         u32 reg;
5504         u16 data;
5505         u8  retry = 0;
5506
5507         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5508
5509         if (hw->phy.type != e1000_phy_igp_3)
5510                 return;
5511
5512         /* Try the workaround twice (if needed) */
5513         do {
5514                 /* Disable link */
5515                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5516                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5517                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5518                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5519
5520                 /* Call gig speed drop workaround on Gig disable before
5521                  * accessing any PHY registers
5522                  */
5523                 if (hw->mac.type == e1000_ich8lan)
5524                         e1000_gig_downshift_workaround_ich8lan(hw);
5525
5526                 /* Write VR power-down enable */
5527                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5528                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5529                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5530                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5531
5532                 /* Read it back and test */
5533                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5534                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5535                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5536                         break;
5537
5538                 /* Issue PHY reset and repeat at most one more time */
5539                 reg = E1000_READ_REG(hw, E1000_CTRL);
5540                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5541                 retry++;
5542         } while (retry);
5543 }
5544
5545 /**
5546  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5547  *  @hw: pointer to the HW structure
5548  *
5549  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5550  *  LPLU, Gig disable, MDIC PHY reset):
5551  *    1) Set Kumeran Near-end loopback
5552  *    2) Clear Kumeran Near-end loopback
5553  *  Should only be called for ICH8[m] devices with any 1G Phy.
5554  **/
5555 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5556 {
5557         s32 ret_val;
5558         u16 reg_data;
5559
5560         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5561
5562         if ((hw->mac.type != e1000_ich8lan) ||
5563             (hw->phy.type == e1000_phy_ife))
5564                 return;
5565
5566         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5567                                               &reg_data);
5568         if (ret_val)
5569                 return;
5570         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5571         ret_val = e1000_write_kmrn_reg_generic(hw,
5572                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
5573                                                reg_data);
5574         if (ret_val)
5575                 return;
5576         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5577         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5578                                      reg_data);
5579 }
5580
5581 /**
5582  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5583  *  @hw: pointer to the HW structure
5584  *
5585  *  During S0 to Sx transition, it is possible the link remains at gig
5586  *  instead of negotiating to a lower speed.  Before going to Sx, set
5587  *  'Gig Disable' to force link speed negotiation to a lower speed based on
5588  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5589  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5590  *  needs to be written.
5591  *  Parts that support (and are linked to a partner which support) EEE in
5592  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5593  *  than 10Mbps w/o EEE.
5594  **/
5595 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5596 {
5597         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5598         u32 phy_ctrl;
5599         s32 ret_val;
5600
5601         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5602
5603         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5604         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5605
5606         if (hw->phy.type == e1000_phy_i217) {
5607                 u16 phy_reg, device_id = hw->device_id;
5608
5609                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5610                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5611                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5612                     (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5613                     (hw->mac.type >= e1000_pch_spt)) {
5614                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5615
5616                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5617                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5618                 }
5619
5620                 ret_val = hw->phy.ops.acquire(hw);
5621                 if (ret_val)
5622                         goto out;
5623
5624                 if (!dev_spec->eee_disable) {
5625                         u16 eee_advert;
5626
5627                         ret_val =
5628                             e1000_read_emi_reg_locked(hw,
5629                                                       I217_EEE_ADVERTISEMENT,
5630                                                       &eee_advert);
5631                         if (ret_val)
5632                                 goto release;
5633
5634                         /* Disable LPLU if both link partners support 100BaseT
5635                          * EEE and 100Full is advertised on both ends of the
5636                          * link, and enable Auto Enable LPI since there will
5637                          * be no driver to enable LPI while in Sx.
5638                          */
5639                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5640                             (dev_spec->eee_lp_ability &
5641                              I82579_EEE_100_SUPPORTED) &&
5642                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5643                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5644                                               E1000_PHY_CTRL_NOND0A_LPLU);
5645
5646                                 /* Set Auto Enable LPI after link up */
5647                                 hw->phy.ops.read_reg_locked(hw,
5648                                                             I217_LPI_GPIO_CTRL,
5649                                                             &phy_reg);
5650                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5651                                 hw->phy.ops.write_reg_locked(hw,
5652                                                              I217_LPI_GPIO_CTRL,
5653                                                              phy_reg);
5654                         }
5655                 }
5656
5657                 /* For i217 Intel Rapid Start Technology support,
5658                  * when the system is going into Sx and no manageability engine
5659                  * is present, the driver must configure proxy to reset only on
5660                  * power good.  LPI (Low Power Idle) state must also reset only
5661                  * on power good, as well as the MTA (Multicast table array).
5662                  * The SMBus release must also be disabled on LCD reset.
5663                  */
5664                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5665                       E1000_ICH_FWSM_FW_VALID)) {
5666                         /* Enable proxy to reset only on power good. */
5667                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5668                                                     &phy_reg);
5669                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5670                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5671                                                      phy_reg);
5672
5673                         /* Set bit enable LPI (EEE) to reset only on
5674                          * power good.
5675                         */
5676                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5677                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5678                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5679
5680                         /* Disable the SMB release on LCD reset. */
5681                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5682                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5683                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5684                 }
5685
5686                 /* Enable MTA to reset for Intel Rapid Start Technology
5687                  * Support
5688                  */
5689                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5690                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5691                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5692
5693 release:
5694                 hw->phy.ops.release(hw);
5695         }
5696 out:
5697         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5698
5699         if (hw->mac.type == e1000_ich8lan)
5700                 e1000_gig_downshift_workaround_ich8lan(hw);
5701
5702         if (hw->mac.type >= e1000_pchlan) {
5703                 e1000_oem_bits_config_ich8lan(hw, false);
5704
5705                 /* Reset PHY to activate OEM bits on 82577/8 */
5706                 if (hw->mac.type == e1000_pchlan)
5707                         e1000_phy_hw_reset_generic(hw);
5708
5709                 ret_val = hw->phy.ops.acquire(hw);
5710                 if (ret_val)
5711                         return;
5712                 e1000_write_smbus_addr(hw);
5713                 hw->phy.ops.release(hw);
5714         }
5715
5716         return;
5717 }
5718
5719 /**
5720  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5721  *  @hw: pointer to the HW structure
5722  *
5723  *  During Sx to S0 transitions on non-managed devices or managed devices
5724  *  on which PHY resets are not blocked, if the PHY registers cannot be
5725  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5726  *  the PHY.
5727  *  On i217, setup Intel Rapid Start Technology.
5728  **/
5729 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5730 {
5731         s32 ret_val;
5732
5733         DEBUGFUNC("e1000_resume_workarounds_pchlan");
5734         if (hw->mac.type < e1000_pch2lan)
5735                 return E1000_SUCCESS;
5736
5737         ret_val = e1000_init_phy_workarounds_pchlan(hw);
5738         if (ret_val) {
5739                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5740                 return ret_val;
5741         }
5742
5743         /* For i217 Intel Rapid Start Technology support when the system
5744          * is transitioning from Sx and no manageability engine is present
5745          * configure SMBus to restore on reset, disable proxy, and enable
5746          * the reset on MTA (Multicast table array).
5747          */
5748         if (hw->phy.type == e1000_phy_i217) {
5749                 u16 phy_reg;
5750
5751                 ret_val = hw->phy.ops.acquire(hw);
5752                 if (ret_val) {
5753                         DEBUGOUT("Failed to setup iRST\n");
5754                         return ret_val;
5755                 }
5756
5757                 /* Clear Auto Enable LPI after link up */
5758                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5759                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5760                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5761
5762                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5763                     E1000_ICH_FWSM_FW_VALID)) {
5764                         /* Restore clear on SMB if no manageability engine
5765                          * is present
5766                          */
5767                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5768                                                               &phy_reg);
5769                         if (ret_val)
5770                                 goto release;
5771                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5772                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5773
5774                         /* Disable Proxy */
5775                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5776                 }
5777                 /* Enable reset on MTA */
5778                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5779                                                       &phy_reg);
5780                 if (ret_val)
5781                         goto release;
5782                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5783                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5784 release:
5785                 if (ret_val)
5786                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5787                 hw->phy.ops.release(hw);
5788                 return ret_val;
5789         }
5790         return E1000_SUCCESS;
5791 }
5792
5793 /**
5794  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5795  *  @hw: pointer to the HW structure
5796  *
5797  *  Return the LED back to the default configuration.
5798  **/
5799 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5800 {
5801         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5802
5803         if (hw->phy.type == e1000_phy_ife)
5804                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5805                                              0);
5806
5807         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5808         return E1000_SUCCESS;
5809 }
5810
5811 /**
5812  *  e1000_led_on_ich8lan - Turn LEDs on
5813  *  @hw: pointer to the HW structure
5814  *
5815  *  Turn on the LEDs.
5816  **/
5817 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5818 {
5819         DEBUGFUNC("e1000_led_on_ich8lan");
5820
5821         if (hw->phy.type == e1000_phy_ife)
5822                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5823                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5824
5825         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5826         return E1000_SUCCESS;
5827 }
5828
5829 /**
5830  *  e1000_led_off_ich8lan - Turn LEDs off
5831  *  @hw: pointer to the HW structure
5832  *
5833  *  Turn off the LEDs.
5834  **/
5835 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5836 {
5837         DEBUGFUNC("e1000_led_off_ich8lan");
5838
5839         if (hw->phy.type == e1000_phy_ife)
5840                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5841                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5842
5843         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5844         return E1000_SUCCESS;
5845 }
5846
5847 /**
5848  *  e1000_setup_led_pchlan - Configures SW controllable LED
5849  *  @hw: pointer to the HW structure
5850  *
5851  *  This prepares the SW controllable LED for use.
5852  **/
5853 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5854 {
5855         DEBUGFUNC("e1000_setup_led_pchlan");
5856
5857         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5858                                      (u16)hw->mac.ledctl_mode1);
5859 }
5860
5861 /**
5862  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5863  *  @hw: pointer to the HW structure
5864  *
5865  *  Return the LED back to the default configuration.
5866  **/
5867 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5868 {
5869         DEBUGFUNC("e1000_cleanup_led_pchlan");
5870
5871         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5872                                      (u16)hw->mac.ledctl_default);
5873 }
5874
5875 /**
5876  *  e1000_led_on_pchlan - Turn LEDs on
5877  *  @hw: pointer to the HW structure
5878  *
5879  *  Turn on the LEDs.
5880  **/
5881 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5882 {
5883         u16 data = (u16)hw->mac.ledctl_mode2;
5884         u32 i, led;
5885
5886         DEBUGFUNC("e1000_led_on_pchlan");
5887
5888         /* If no link, then turn LED on by setting the invert bit
5889          * for each LED that's mode is "link_up" in ledctl_mode2.
5890          */
5891         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5892                 for (i = 0; i < 3; i++) {
5893                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5894                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5895                             E1000_LEDCTL_MODE_LINK_UP)
5896                                 continue;
5897                         if (led & E1000_PHY_LED0_IVRT)
5898                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5899                         else
5900                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5901                 }
5902         }
5903
5904         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5905 }
5906
5907 /**
5908  *  e1000_led_off_pchlan - Turn LEDs off
5909  *  @hw: pointer to the HW structure
5910  *
5911  *  Turn off the LEDs.
5912  **/
5913 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5914 {
5915         u16 data = (u16)hw->mac.ledctl_mode1;
5916         u32 i, led;
5917
5918         DEBUGFUNC("e1000_led_off_pchlan");
5919
5920         /* If no link, then turn LED off by clearing the invert bit
5921          * for each LED that's mode is "link_up" in ledctl_mode1.
5922          */
5923         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5924                 for (i = 0; i < 3; i++) {
5925                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5926                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5927                             E1000_LEDCTL_MODE_LINK_UP)
5928                                 continue;
5929                         if (led & E1000_PHY_LED0_IVRT)
5930                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5931                         else
5932                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5933                 }
5934         }
5935
5936         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5937 }
5938
5939 /**
5940  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5941  *  @hw: pointer to the HW structure
5942  *
5943  *  Read appropriate register for the config done bit for completion status
5944  *  and configure the PHY through s/w for EEPROM-less parts.
5945  *
5946  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5947  *  config done bit, so only an error is logged and continues.  If we were
5948  *  to return with error, EEPROM-less silicon would not be able to be reset
5949  *  or change link.
5950  **/
5951 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5952 {
5953         s32 ret_val = E1000_SUCCESS;
5954         u32 bank = 0;
5955         u32 status;
5956
5957         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5958
5959         e1000_get_cfg_done_generic(hw);
5960
5961         /* Wait for indication from h/w that it has completed basic config */
5962         if (hw->mac.type >= e1000_ich10lan) {
5963                 e1000_lan_init_done_ich8lan(hw);
5964         } else {
5965                 ret_val = e1000_get_auto_rd_done_generic(hw);
5966                 if (ret_val) {
5967                         /* When auto config read does not complete, do not
5968                          * return with an error. This can happen in situations
5969                          * where there is no eeprom and prevents getting link.
5970                          */
5971                         DEBUGOUT("Auto Read Done did not complete\n");
5972                         ret_val = E1000_SUCCESS;
5973                 }
5974         }
5975
5976         /* Clear PHY Reset Asserted bit */
5977         status = E1000_READ_REG(hw, E1000_STATUS);
5978         if (status & E1000_STATUS_PHYRA)
5979                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5980         else
5981                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5982
5983         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5984         if (hw->mac.type <= e1000_ich9lan) {
5985                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5986                     (hw->phy.type == e1000_phy_igp_3)) {
5987                         e1000_phy_init_script_igp3(hw);
5988                 }
5989         } else {
5990                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5991                         /* Maybe we should do a basic PHY config */
5992                         DEBUGOUT("EEPROM not present\n");
5993                         ret_val = -E1000_ERR_CONFIG;
5994                 }
5995         }
5996
5997         return ret_val;
5998 }
5999
6000 /**
6001  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6002  * @hw: pointer to the HW structure
6003  *
6004  * In the case of a PHY power down to save power, or to turn off link during a
6005  * driver unload, or wake on lan is not enabled, remove the link.
6006  **/
6007 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6008 {
6009         /* If the management interface is not enabled, then power down */
6010         if (!(hw->mac.ops.check_mng_mode(hw) ||
6011               hw->phy.ops.check_reset_block(hw)))
6012                 e1000_power_down_phy_copper(hw);
6013
6014         return;
6015 }
6016
6017 /**
6018  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6019  *  @hw: pointer to the HW structure
6020  *
6021  *  Clears hardware counters specific to the silicon family and calls
6022  *  clear_hw_cntrs_generic to clear all general purpose counters.
6023  **/
6024 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6025 {
6026         u16 phy_data;
6027         s32 ret_val;
6028
6029         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6030
6031         e1000_clear_hw_cntrs_base_generic(hw);
6032
6033         E1000_READ_REG(hw, E1000_ALGNERRC);
6034         E1000_READ_REG(hw, E1000_RXERRC);
6035         E1000_READ_REG(hw, E1000_TNCRS);
6036         E1000_READ_REG(hw, E1000_CEXTERR);
6037         E1000_READ_REG(hw, E1000_TSCTC);
6038         E1000_READ_REG(hw, E1000_TSCTFC);
6039
6040         E1000_READ_REG(hw, E1000_MGTPRC);
6041         E1000_READ_REG(hw, E1000_MGTPDC);
6042         E1000_READ_REG(hw, E1000_MGTPTC);
6043
6044         E1000_READ_REG(hw, E1000_IAC);
6045         E1000_READ_REG(hw, E1000_ICRXOC);
6046
6047         /* Clear PHY statistics registers */
6048         if ((hw->phy.type == e1000_phy_82578) ||
6049             (hw->phy.type == e1000_phy_82579) ||
6050             (hw->phy.type == e1000_phy_i217) ||
6051             (hw->phy.type == e1000_phy_82577)) {
6052                 ret_val = hw->phy.ops.acquire(hw);
6053                 if (ret_val)
6054                         return;
6055                 ret_val = hw->phy.ops.set_page(hw,
6056                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
6057                 if (ret_val)
6058                         goto release;
6059                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6060                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6061                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6062                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6063                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6064                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6065                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6066                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6067                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6068                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6069                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6070                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6071                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6072                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6073 release:
6074                 hw->phy.ops.release(hw);
6075         }
6076 }
6077
6078 /**
6079  *  e1000_configure_k0s_lpt - Configure K0s power state
6080  *  @hw: pointer to the HW structure
6081  *  @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
6082  *      0 corresponds to 128ns, each value over 0 doubles the duration.
6083  *  @min_time: Minimum Tx idle period allowed  - valid values are 0 to 4.
6084  *      0 corresponds to 128ns, each value over 0 doubles the duration.
6085  *
6086  *  Configure the K1 power state based on the provided parameter.
6087  *  Assumes semaphore already acquired.
6088  *
6089  *  Success returns 0, Failure returns:
6090  *      -E1000_ERR_PHY (-2) in case of access error
6091  *      -E1000_ERR_PARAM (-4) in case of parameters error
6092  **/
6093 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
6094 {
6095         s32 ret_val;
6096         u16 kmrn_reg = 0;
6097
6098         DEBUGFUNC("e1000_configure_k0s_lpt");
6099
6100         if (entry_latency > 3 || min_time > 4)
6101                 return -E1000_ERR_PARAM;
6102
6103         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6104                                              &kmrn_reg);
6105         if (ret_val)
6106                 return ret_val;
6107
6108         /* for now don't touch the latency */
6109         kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
6110         kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
6111
6112         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6113                                               kmrn_reg);
6114         if (ret_val)
6115                 return ret_val;
6116
6117         return E1000_SUCCESS;
6118 }