7aea8dd5399f03c4f8d2abc2161a165c8580a0b0
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
102                                             u16 *data);
103 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
104 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
110 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
111                                            u16 *speed, u16 *duplex);
112 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
116 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
117 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
118 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
120 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
122 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
123 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u8 *data);
126 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127                                           u8 size, u16 *data);
128 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
129                                           u32 offset, u16 *data);
130 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
131                                                  u32 offset, u8 byte);
132 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
133 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
134 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
135 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142         struct ich8_hsfsts {
143                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
144                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
145                 u16 dael:1; /* bit 2 Direct Access error Log */
146                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148                 u16 reserved1:2; /* bit 13:6 Reserved */
149                 u16 reserved2:6; /* bit 13:6 Reserved */
150                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152         } hsf_status;
153         u16 regval;
154 };
155
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159         struct ich8_hsflctl {
160                 u16 flcgo:1;   /* 0 Flash Cycle Go */
161                 u16 flcycle:2;   /* 2:1 Flash Cycle */
162                 u16 reserved:5;   /* 7:3 Reserved  */
163                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164                 u16 flockdn:6;   /* 15:10 Reserved */
165         } hsf_ctrl;
166         u16 regval;
167 };
168
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171         struct ich8_flracc {
172                 u32 grra:8; /* 0:7 GbE region Read Access */
173                 u32 grwa:8; /* 8:15 GbE region Write Access */
174                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176         } hsf_flregacc;
177         u16 regval;
178 };
179
180 /**
181  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
182  *  @hw: pointer to the HW structure
183  *
184  *  Test access to the PHY registers by reading the PHY ID registers.  If
185  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
186  *  otherwise assume the read PHY ID is correct if it is valid.
187  *
188  *  Assumes the sw/fw/hw semaphore is already acquired.
189  **/
190 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
191 {
192         u16 phy_reg = 0;
193         u32 phy_id = 0;
194         s32 ret_val = 0;
195         u16 retry_count;
196         u32 mac_reg = 0;
197
198         for (retry_count = 0; retry_count < 2; retry_count++) {
199                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
200                 if (ret_val || (phy_reg == 0xFFFF))
201                         continue;
202                 phy_id = (u32)(phy_reg << 16);
203
204                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
205                 if (ret_val || (phy_reg == 0xFFFF)) {
206                         phy_id = 0;
207                         continue;
208                 }
209                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210                 break;
211         }
212
213         if (hw->phy.id) {
214                 if  (hw->phy.id == phy_id)
215                         goto out;
216         } else if (phy_id) {
217                 hw->phy.id = phy_id;
218                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
219                 goto out;
220         }
221
222         /* In case the PHY needs to be in mdio slow mode,
223          * set slow mode and try to get the PHY id again.
224          */
225         if (hw->mac.type < e1000_pch_lpt) {
226                 hw->phy.ops.release(hw);
227                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
228                 if (!ret_val)
229                         ret_val = e1000_get_phy_id(hw);
230                 hw->phy.ops.acquire(hw);
231         }
232
233         if (ret_val)
234                 return false;
235 out:
236         if (hw->mac.type == e1000_pch_lpt) {
237                 /* Only unforce SMBus if ME is not active */
238                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
239                     E1000_ICH_FWSM_FW_VALID)) {
240                         /* Unforce SMBus mode in PHY */
241                         hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
242                         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
243                         hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
244
245                         /* Unforce SMBus mode in MAC */
246                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
247                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
248                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
249                 }
250         }
251
252         return true;
253 }
254
255 /**
256  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
257  *  @hw: pointer to the HW structure
258  *
259  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
260  *  used to reset the PHY to a quiescent state when necessary.
261  **/
262 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
263 {
264         u32 mac_reg;
265
266         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
267
268         /* Set Phy Config Counter to 50msec */
269         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
270         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
271         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
272         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
273
274         /* Toggle LANPHYPC Value bit */
275         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
276         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
277         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
278         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
279         E1000_WRITE_FLUSH(hw);
280         usec_delay(10);
281         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
282         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
283         E1000_WRITE_FLUSH(hw);
284
285         if (hw->mac.type < e1000_pch_lpt) {
286                 msec_delay(50);
287         } else {
288                 u16 count = 20;
289
290                 do {
291                         msec_delay(5);
292                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
293                            E1000_CTRL_EXT_LPCD) && count--);
294
295                 msec_delay(30);
296         }
297 }
298
299 /**
300  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
301  *  @hw: pointer to the HW structure
302  *
303  *  Workarounds/flow necessary for PHY initialization during driver load
304  *  and resume paths.
305  **/
306 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
307 {
308         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
309         s32 ret_val;
310
311         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
312
313         /* Gate automatic PHY configuration by hardware on managed and
314          * non-managed 82579 and newer adapters.
315          */
316         e1000_gate_hw_phy_config_ich8lan(hw, true);
317
318 #ifdef ULP_SUPPORT
319         /* It is not possible to be certain of the current state of ULP
320          * so forcibly disable it.
321          */
322         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
323
324 #endif /* ULP_SUPPORT */
325         ret_val = hw->phy.ops.acquire(hw);
326         if (ret_val) {
327                 DEBUGOUT("Failed to initialize PHY flow\n");
328                 goto out;
329         }
330
331         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
332          * inaccessible and resetting the PHY is not blocked, toggle the
333          * LANPHYPC Value bit to force the interconnect to PCIe mode.
334          */
335         switch (hw->mac.type) {
336         case e1000_pch_lpt:
337                 if (e1000_phy_is_accessible_pchlan(hw))
338                         break;
339
340                 /* Before toggling LANPHYPC, see if PHY is accessible by
341                  * forcing MAC to SMBus mode first.
342                  */
343                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
344                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
345                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
346
347                 /* Wait 50 milliseconds for MAC to finish any retries
348                  * that it might be trying to perform from previous
349                  * attempts to acknowledge any phy read requests.
350                  */
351                  msec_delay(50);
352
353                 /* fall-through */
354         case e1000_pch2lan:
355                 if (e1000_phy_is_accessible_pchlan(hw))
356                         break;
357
358                 /* fall-through */
359         case e1000_pchlan:
360                 if ((hw->mac.type == e1000_pchlan) &&
361                     (fwsm & E1000_ICH_FWSM_FW_VALID))
362                         break;
363
364                 if (hw->phy.ops.check_reset_block(hw)) {
365                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
366                         ret_val = -E1000_ERR_PHY;
367                         break;
368                 }
369
370                 /* Toggle LANPHYPC Value bit */
371                 e1000_toggle_lanphypc_pch_lpt(hw);
372                 if (hw->mac.type >= e1000_pch_lpt) {
373                         if (e1000_phy_is_accessible_pchlan(hw))
374                                 break;
375
376                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
377                          * so ensure that the MAC is also out of SMBus mode
378                          */
379                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
380                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
381                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
382
383                         if (e1000_phy_is_accessible_pchlan(hw))
384                                 break;
385
386                         ret_val = -E1000_ERR_PHY;
387                 }
388                 break;
389         default:
390                 break;
391         }
392
393         hw->phy.ops.release(hw);
394         if (!ret_val) {
395
396                 /* Check to see if able to reset PHY.  Print error if not */
397                 if (hw->phy.ops.check_reset_block(hw)) {
398                         ERROR_REPORT("Reset blocked by ME\n");
399                         goto out;
400                 }
401
402                 /* Reset the PHY before any access to it.  Doing so, ensures
403                  * that the PHY is in a known good state before we read/write
404                  * PHY registers.  The generic reset is sufficient here,
405                  * because we haven't determined the PHY type yet.
406                  */
407                 ret_val = e1000_phy_hw_reset_generic(hw);
408                 if (ret_val)
409                         goto out;
410
411                 /* On a successful reset, possibly need to wait for the PHY
412                  * to quiesce to an accessible state before returning control
413                  * to the calling function.  If the PHY does not quiesce, then
414                  * return E1000E_BLK_PHY_RESET, as this is the condition that
415                  *  the PHY is in.
416                  */
417                 ret_val = hw->phy.ops.check_reset_block(hw);
418                 if (ret_val)
419                         ERROR_REPORT("ME blocked access to PHY after reset\n");
420         }
421
422 out:
423         /* Ungate automatic PHY configuration on non-managed 82579 */
424         if ((hw->mac.type == e1000_pch2lan) &&
425             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
426                 msec_delay(10);
427                 e1000_gate_hw_phy_config_ich8lan(hw, false);
428         }
429
430         return ret_val;
431 }
432
433 /**
434  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
435  *  @hw: pointer to the HW structure
436  *
437  *  Initialize family-specific PHY parameters and function pointers.
438  **/
439 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
440 {
441         struct e1000_phy_info *phy = &hw->phy;
442         s32 ret_val;
443
444         DEBUGFUNC("e1000_init_phy_params_pchlan");
445
446         phy->addr               = 1;
447         phy->reset_delay_us     = 100;
448
449         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
450         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
451         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
452         phy->ops.set_page       = e1000_set_page_igp;
453         phy->ops.read_reg       = e1000_read_phy_reg_hv;
454         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
455         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
456         phy->ops.release        = e1000_release_swflag_ich8lan;
457         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
458         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
459         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
460         phy->ops.write_reg      = e1000_write_phy_reg_hv;
461         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
462         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
463         phy->ops.power_up       = e1000_power_up_phy_copper;
464         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
465         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
466
467         phy->id = e1000_phy_unknown;
468
469         ret_val = e1000_init_phy_workarounds_pchlan(hw);
470         if (ret_val)
471                 return ret_val;
472
473         if (phy->id == e1000_phy_unknown)
474                 switch (hw->mac.type) {
475                 default:
476                         ret_val = e1000_get_phy_id(hw);
477                         if (ret_val)
478                                 return ret_val;
479                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
480                                 break;
481                         /* fall-through */
482                 case e1000_pch2lan:
483                 case e1000_pch_lpt:
484                         /* In case the PHY needs to be in mdio slow mode,
485                          * set slow mode and try to get the PHY id again.
486                          */
487                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
488                         if (ret_val)
489                                 return ret_val;
490                         ret_val = e1000_get_phy_id(hw);
491                         if (ret_val)
492                                 return ret_val;
493                         break;
494                 }
495         phy->type = e1000_get_phy_type_from_id(phy->id);
496
497         switch (phy->type) {
498         case e1000_phy_82577:
499         case e1000_phy_82579:
500         case e1000_phy_i217:
501                 phy->ops.check_polarity = e1000_check_polarity_82577;
502                 phy->ops.force_speed_duplex =
503                         e1000_phy_force_speed_duplex_82577;
504                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
505                 phy->ops.get_info = e1000_get_phy_info_82577;
506                 phy->ops.commit = e1000_phy_sw_reset_generic;
507                 break;
508         case e1000_phy_82578:
509                 phy->ops.check_polarity = e1000_check_polarity_m88;
510                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
511                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
512                 phy->ops.get_info = e1000_get_phy_info_m88;
513                 break;
514         default:
515                 ret_val = -E1000_ERR_PHY;
516                 break;
517         }
518
519         return ret_val;
520 }
521
522 /**
523  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
524  *  @hw: pointer to the HW structure
525  *
526  *  Initialize family-specific PHY parameters and function pointers.
527  **/
528 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
529 {
530         struct e1000_phy_info *phy = &hw->phy;
531         s32 ret_val;
532         u16 i = 0;
533
534         DEBUGFUNC("e1000_init_phy_params_ich8lan");
535
536         phy->addr               = 1;
537         phy->reset_delay_us     = 100;
538
539         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
540         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
541         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
542         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
543         phy->ops.read_reg       = e1000_read_phy_reg_igp;
544         phy->ops.release        = e1000_release_swflag_ich8lan;
545         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
546         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
547         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
548         phy->ops.write_reg      = e1000_write_phy_reg_igp;
549         phy->ops.power_up       = e1000_power_up_phy_copper;
550         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
551
552         /* We may need to do this twice - once for IGP and if that fails,
553          * we'll set BM func pointers and try again
554          */
555         ret_val = e1000_determine_phy_address(hw);
556         if (ret_val) {
557                 phy->ops.write_reg = e1000_write_phy_reg_bm;
558                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
559                 ret_val = e1000_determine_phy_address(hw);
560                 if (ret_val) {
561                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
562                         return ret_val;
563                 }
564         }
565
566         phy->id = 0;
567         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
568                (i++ < 100)) {
569                 msec_delay(1);
570                 ret_val = e1000_get_phy_id(hw);
571                 if (ret_val)
572                         return ret_val;
573         }
574
575         /* Verify phy id */
576         switch (phy->id) {
577         case IGP03E1000_E_PHY_ID:
578                 phy->type = e1000_phy_igp_3;
579                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
580                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
581                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
582                 phy->ops.get_info = e1000_get_phy_info_igp;
583                 phy->ops.check_polarity = e1000_check_polarity_igp;
584                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
585                 break;
586         case IFE_E_PHY_ID:
587         case IFE_PLUS_E_PHY_ID:
588         case IFE_C_E_PHY_ID:
589                 phy->type = e1000_phy_ife;
590                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
591                 phy->ops.get_info = e1000_get_phy_info_ife;
592                 phy->ops.check_polarity = e1000_check_polarity_ife;
593                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
594                 break;
595         case BME1000_E_PHY_ID:
596                 phy->type = e1000_phy_bm;
597                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
598                 phy->ops.read_reg = e1000_read_phy_reg_bm;
599                 phy->ops.write_reg = e1000_write_phy_reg_bm;
600                 phy->ops.commit = e1000_phy_sw_reset_generic;
601                 phy->ops.get_info = e1000_get_phy_info_m88;
602                 phy->ops.check_polarity = e1000_check_polarity_m88;
603                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
604                 break;
605         default:
606                 return -E1000_ERR_PHY;
607                 break;
608         }
609
610         return E1000_SUCCESS;
611 }
612
613 /**
614  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
615  *  @hw: pointer to the HW structure
616  *
617  *  Initialize family-specific NVM parameters and function
618  *  pointers.
619  **/
620 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
621 {
622         struct e1000_nvm_info *nvm = &hw->nvm;
623         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
624         u32 gfpreg, sector_base_addr, sector_end_addr;
625         u16 i;
626
627         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
628
629         /* Can't read flash registers if the register set isn't mapped. */
630         nvm->type = e1000_nvm_flash_sw;
631         if (!hw->flash_address) {
632                 DEBUGOUT("ERROR: Flash registers not mapped\n");
633                 return -E1000_ERR_CONFIG;
634         }
635
636         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
637
638         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
639          * Add 1 to sector_end_addr since this sector is included in
640          * the overall size.
641          */
642         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
643         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
644
645         /* flash_base_addr is byte-aligned */
646         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
647
648         /* find total size of the NVM, then cut in half since the total
649          * size represents two separate NVM banks.
650          */
651         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
652                                 << FLASH_SECTOR_ADDR_SHIFT);
653         nvm->flash_bank_size /= 2;
654         /* Adjust to word count */
655         nvm->flash_bank_size /= sizeof(u16);
656
657         nvm->word_size = E1000_SHADOW_RAM_WORDS;
658
659         /* Clear shadow ram */
660         for (i = 0; i < nvm->word_size; i++) {
661                 dev_spec->shadow_ram[i].modified = false;
662                 dev_spec->shadow_ram[i].value    = 0xFFFF;
663         }
664
665         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
666         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
667
668         /* Function Pointers */
669         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
670         nvm->ops.release        = e1000_release_nvm_ich8lan;
671         nvm->ops.read           = e1000_read_nvm_ich8lan;
672         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
673         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
674         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
675         nvm->ops.write          = e1000_write_nvm_ich8lan;
676
677         return E1000_SUCCESS;
678 }
679
680 /**
681  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
682  *  @hw: pointer to the HW structure
683  *
684  *  Initialize family-specific MAC parameters and function
685  *  pointers.
686  **/
687 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
688 {
689         struct e1000_mac_info *mac = &hw->mac;
690 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
691         u16 pci_cfg;
692 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
693
694         DEBUGFUNC("e1000_init_mac_params_ich8lan");
695
696         /* Set media type function pointer */
697         hw->phy.media_type = e1000_media_type_copper;
698
699         /* Set mta register count */
700         mac->mta_reg_count = 32;
701         /* Set rar entry count */
702         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
703         if (mac->type == e1000_ich8lan)
704                 mac->rar_entry_count--;
705         /* Set if part includes ASF firmware */
706         mac->asf_firmware_present = true;
707         /* FWSM register */
708         mac->has_fwsm = true;
709         /* ARC subsystem not supported */
710         mac->arc_subsystem_valid = false;
711         /* Adaptive IFS supported */
712         mac->adaptive_ifs = true;
713
714         /* Function pointers */
715
716         /* bus type/speed/width */
717         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
718         /* function id */
719         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
720         /* reset */
721         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
722         /* hw initialization */
723         mac->ops.init_hw = e1000_init_hw_ich8lan;
724         /* link setup */
725         mac->ops.setup_link = e1000_setup_link_ich8lan;
726         /* physical interface setup */
727         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
728         /* check for link */
729         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
730         /* link info */
731         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
732         /* multicast address update */
733         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
734         /* clear hardware counters */
735         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
736
737         /* LED and other operations */
738         switch (mac->type) {
739         case e1000_ich8lan:
740         case e1000_ich9lan:
741         case e1000_ich10lan:
742                 /* check management mode */
743                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
744                 /* ID LED init */
745                 mac->ops.id_led_init = e1000_id_led_init_generic;
746                 /* blink LED */
747                 mac->ops.blink_led = e1000_blink_led_generic;
748                 /* setup LED */
749                 mac->ops.setup_led = e1000_setup_led_generic;
750                 /* cleanup LED */
751                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
752                 /* turn on/off LED */
753                 mac->ops.led_on = e1000_led_on_ich8lan;
754                 mac->ops.led_off = e1000_led_off_ich8lan;
755                 break;
756         case e1000_pch2lan:
757                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
758                 mac->ops.rar_set = e1000_rar_set_pch2lan;
759                 /* fall-through */
760         case e1000_pch_lpt:
761 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
762                 /* multicast address update for pch2 */
763                 mac->ops.update_mc_addr_list =
764                         e1000_update_mc_addr_list_pch2lan;
765                 /* fall-through */
766 #endif
767         case e1000_pchlan:
768 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
769                 /* save PCH revision_id */
770                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
771                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
772 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
773                 /* check management mode */
774                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
775                 /* ID LED init */
776                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
777                 /* setup LED */
778                 mac->ops.setup_led = e1000_setup_led_pchlan;
779                 /* cleanup LED */
780                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
781                 /* turn on/off LED */
782                 mac->ops.led_on = e1000_led_on_pchlan;
783                 mac->ops.led_off = e1000_led_off_pchlan;
784                 break;
785         default:
786                 break;
787         }
788
789         if (mac->type == e1000_pch_lpt) {
790                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
791                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
792                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
793         }
794
795         /* Enable PCS Lock-loss workaround for ICH8 */
796         if (mac->type == e1000_ich8lan)
797                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
798
799         return E1000_SUCCESS;
800 }
801
802 /**
803  *  __e1000_access_emi_reg_locked - Read/write EMI register
804  *  @hw: pointer to the HW structure
805  *  @addr: EMI address to program
806  *  @data: pointer to value to read/write from/to the EMI address
807  *  @read: boolean flag to indicate read or write
808  *
809  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
810  **/
811 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
812                                          u16 *data, bool read)
813 {
814         s32 ret_val;
815
816         DEBUGFUNC("__e1000_access_emi_reg_locked");
817
818         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
819         if (ret_val)
820                 return ret_val;
821
822         if (read)
823                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
824                                                       data);
825         else
826                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
827                                                        *data);
828
829         return ret_val;
830 }
831
832 /**
833  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
834  *  @hw: pointer to the HW structure
835  *  @addr: EMI address to program
836  *  @data: value to be read from the EMI address
837  *
838  *  Assumes the SW/FW/HW Semaphore is already acquired.
839  **/
840 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
841 {
842         DEBUGFUNC("e1000_read_emi_reg_locked");
843
844         return __e1000_access_emi_reg_locked(hw, addr, data, true);
845 }
846
847 /**
848  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
849  *  @hw: pointer to the HW structure
850  *  @addr: EMI address to program
851  *  @data: value to be written to the EMI address
852  *
853  *  Assumes the SW/FW/HW Semaphore is already acquired.
854  **/
855 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
856 {
857         DEBUGFUNC("e1000_read_emi_reg_locked");
858
859         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
860 }
861
862 /**
863  *  e1000_set_eee_pchlan - Enable/disable EEE support
864  *  @hw: pointer to the HW structure
865  *
866  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
867  *  the link and the EEE capabilities of the link partner.  The LPI Control
868  *  register bits will remain set only if/when link is up.
869  *
870  *  EEE LPI must not be asserted earlier than one second after link is up.
871  *  On 82579, EEE LPI should not be enabled until such time otherwise there
872  *  can be link issues with some switches.  Other devices can have EEE LPI
873  *  enabled immediately upon link up since they have a timer in hardware which
874  *  prevents LPI from being asserted too early.
875  **/
876 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
877 {
878         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
879         s32 ret_val;
880         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
881
882         DEBUGFUNC("e1000_set_eee_pchlan");
883
884         switch (hw->phy.type) {
885         case e1000_phy_82579:
886                 lpa = I82579_EEE_LP_ABILITY;
887                 pcs_status = I82579_EEE_PCS_STATUS;
888                 adv_addr = I82579_EEE_ADVERTISEMENT;
889                 break;
890         case e1000_phy_i217:
891                 lpa = I217_EEE_LP_ABILITY;
892                 pcs_status = I217_EEE_PCS_STATUS;
893                 adv_addr = I217_EEE_ADVERTISEMENT;
894                 break;
895         default:
896                 return E1000_SUCCESS;
897         }
898
899         ret_val = hw->phy.ops.acquire(hw);
900         if (ret_val)
901                 return ret_val;
902
903         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
904         if (ret_val)
905                 goto release;
906
907         /* Clear bits that enable EEE in various speeds */
908         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
909
910         /* Enable EEE if not disabled by user */
911         if (!dev_spec->eee_disable) {
912                 /* Save off link partner's EEE ability */
913                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
914                                                     &dev_spec->eee_lp_ability);
915                 if (ret_val)
916                         goto release;
917
918                 /* Read EEE advertisement */
919                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
920                 if (ret_val)
921                         goto release;
922
923                 /* Enable EEE only for speeds in which the link partner is
924                  * EEE capable and for which we advertise EEE.
925                  */
926                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
927                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
928
929                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
930                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
931                         if (data & NWAY_LPAR_100TX_FD_CAPS)
932                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
933                         else
934                                 /* EEE is not supported in 100Half, so ignore
935                                  * partner's EEE in 100 ability if full-duplex
936                                  * is not advertised.
937                                  */
938                                 dev_spec->eee_lp_ability &=
939                                     ~I82579_EEE_100_SUPPORTED;
940                 }
941         }
942
943         if (hw->phy.type == e1000_phy_82579) {
944                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
945                                                     &data);
946                 if (ret_val)
947                         goto release;
948
949                 data &= ~I82579_LPI_100_PLL_SHUT;
950                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
951                                                      data);
952         }
953
954         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
955         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
956         if (ret_val)
957                 goto release;
958
959         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
960 release:
961         hw->phy.ops.release(hw);
962
963         return ret_val;
964 }
965
966 /**
967  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
968  *  @hw:   pointer to the HW structure
969  *  @link: link up bool flag
970  *
971  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
972  *  preventing further DMA write requests.  Workaround the issue by disabling
973  *  the de-assertion of the clock request when in 1Gpbs mode.
974  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
975  *  speeds in order to avoid Tx hangs.
976  **/
977 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
978 {
979         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
980         u32 status = E1000_READ_REG(hw, E1000_STATUS);
981         s32 ret_val = E1000_SUCCESS;
982         u16 reg;
983
984         if (link && (status & E1000_STATUS_SPEED_1000)) {
985                 ret_val = hw->phy.ops.acquire(hw);
986                 if (ret_val)
987                         return ret_val;
988
989                 ret_val =
990                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
991                                                &reg);
992                 if (ret_val)
993                         goto release;
994
995                 ret_val =
996                     e1000_write_kmrn_reg_locked(hw,
997                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
998                                                 reg &
999                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1000                 if (ret_val)
1001                         goto release;
1002
1003                 usec_delay(10);
1004
1005                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1006                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1007
1008                 ret_val =
1009                     e1000_write_kmrn_reg_locked(hw,
1010                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1011                                                 reg);
1012 release:
1013                 hw->phy.ops.release(hw);
1014         } else {
1015                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1016                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1017
1018                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1019                               (status & E1000_STATUS_FD)))
1020                         goto update_fextnvm6;
1021
1022                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1023                 if (ret_val)
1024                         return ret_val;
1025
1026                 /* Clear link status transmit timeout */
1027                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1028
1029                 if (status & E1000_STATUS_SPEED_100) {
1030                         /* Set inband Tx timeout to 5x10us for 100Half */
1031                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1032
1033                         /* Do not extend the K1 entry latency for 100Half */
1034                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1035                 } else {
1036                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1037                         reg |= 50 <<
1038                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1039
1040                         /* Extend the K1 entry latency for 10 Mbps */
1041                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1042                 }
1043
1044                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1045                 if (ret_val)
1046                         return ret_val;
1047
1048 update_fextnvm6:
1049                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1050         }
1051
1052         return ret_val;
1053 }
1054
1055 #ifdef ULP_SUPPORT
1056 /**
1057  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1058  *  @hw: pointer to the HW structure
1059  *  @to_sx: boolean indicating a system power state transition to Sx
1060  *
1061  *  When link is down, configure ULP mode to significantly reduce the power
1062  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1063  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1064  *  system, configure the ULP mode by software.
1065  */
1066 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1067 {
1068         u32 mac_reg;
1069         s32 ret_val = E1000_SUCCESS;
1070         u16 phy_reg;
1071
1072         if ((hw->mac.type < e1000_pch_lpt) ||
1073             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1074             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1075             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1076             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1077             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1078                 return 0;
1079
1080         if (!to_sx) {
1081                 int i = 0;
1082                 /* Poll up to 5 seconds for Cable Disconnected indication */
1083                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1084                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1085                         /* Bail if link is re-acquired */
1086                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1087                                 return -E1000_ERR_PHY;
1088                         if (i++ == 100)
1089                                 break;
1090
1091                         msec_delay(50);
1092                 }
1093                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1094                           (E1000_READ_REG(hw, E1000_FEXT) &
1095                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1096                           i * 50);
1097                 if (!(E1000_READ_REG(hw, E1000_FEXT) &
1098                     E1000_FEXT_PHY_CABLE_DISCONNECTED))
1099                         return 0;
1100         }
1101
1102         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1103                 /* Request ME configure ULP mode in the PHY */
1104                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1105                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1106                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1107
1108                 goto out;
1109         }
1110
1111         ret_val = hw->phy.ops.acquire(hw);
1112         if (ret_val)
1113                 goto out;
1114
1115         /* During S0 Idle keep the phy in PCI-E mode */
1116         if (hw->dev_spec.ich8lan.smbus_disable)
1117                 goto skip_smbus;
1118
1119         /* Force SMBus mode in PHY */
1120         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1121         if (ret_val)
1122                 goto release;
1123         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1124         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1125
1126         /* Force SMBus mode in MAC */
1127         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1128         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1129         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1130
1131 skip_smbus:
1132         if (!to_sx) {
1133                 /* Change the 'Link Status Change' interrupt to trigger
1134                  * on 'Cable Status Change'
1135                  */
1136                 ret_val = e1000_read_kmrn_reg_locked(hw,
1137                                                      E1000_KMRNCTRLSTA_OP_MODES,
1138                                                      &phy_reg);
1139                 if (ret_val)
1140                         goto release;
1141                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1142                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1143                                             phy_reg);
1144         }
1145
1146         /* Set Inband ULP Exit, Reset to SMBus mode and
1147          * Disable SMBus Release on PERST# in PHY
1148          */
1149         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1150         if (ret_val)
1151                 goto release;
1152         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1153                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1154         if (to_sx) {
1155                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1156                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1157                 else
1158                         phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1159
1160                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1161                 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1162         } else {
1163                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1164                 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1165                 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1166         }
1167         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1168
1169         /* Set Disable SMBus Release on PERST# in MAC */
1170         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1171         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1172         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1173
1174         /* Commit ULP changes in PHY by starting auto ULP configuration */
1175         phy_reg |= I218_ULP_CONFIG1_START;
1176         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1177
1178         if (!to_sx) {
1179                 /* Disable Tx so that the MAC doesn't send any (buffered)
1180                  * packets to the PHY.
1181                  */
1182                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1183                 mac_reg &= ~E1000_TCTL_EN;
1184                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1185         }
1186
1187 release:
1188         hw->phy.ops.release(hw);
1189 out:
1190         if (ret_val)
1191                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1192         else
1193                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1194
1195         return ret_val;
1196 }
1197
1198 /**
1199  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1200  *  @hw: pointer to the HW structure
1201  *  @force: boolean indicating whether or not to force disabling ULP
1202  *
1203  *  Un-configure ULP mode when link is up, the system is transitioned from
1204  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1205  *  system, poll for an indication from ME that ULP has been un-configured.
1206  *  If not on an ME enabled system, un-configure the ULP mode by software.
1207  *
1208  *  During nominal operation, this function is called when link is acquired
1209  *  to disable ULP mode (force=false); otherwise, for example when unloading
1210  *  the driver or during Sx->S0 transitions, this is called with force=true
1211  *  to forcibly disable ULP.
1212
1213  *  When the cable is plugged in while the device is in D0, a Cable Status
1214  *  Change interrupt is generated which causes this function to be called
1215  *  to partially disable ULP mode and restart autonegotiation.  This function
1216  *  is then called again due to the resulting Link Status Change interrupt
1217  *  to finish cleaning up after the ULP flow.
1218  */
1219 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1220 {
1221         s32 ret_val = E1000_SUCCESS;
1222         u32 mac_reg;
1223         u16 phy_reg;
1224         int i = 0;
1225
1226         if ((hw->mac.type < e1000_pch_lpt) ||
1227             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1228             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1229             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1230             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1231             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1232                 return 0;
1233
1234         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1235                 if (force) {
1236                         /* Request ME un-configure ULP mode in the PHY */
1237                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1238                         mac_reg &= ~E1000_H2ME_ULP;
1239                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1240                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1241                 }
1242
1243                 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1244                 while (E1000_READ_REG(hw, E1000_FWSM) &
1245                        E1000_FWSM_ULP_CFG_DONE) {
1246                         if (i++ == 30) {
1247                                 ret_val = -E1000_ERR_PHY;
1248                                 goto out;
1249                         }
1250
1251                         msec_delay(10);
1252                 }
1253                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1254
1255                 if (force) {
1256                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1257                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1258                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1259                 } else {
1260                         /* Clear H2ME.ULP after ME ULP configuration */
1261                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1262                         mac_reg &= ~E1000_H2ME_ULP;
1263                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1264
1265                         /* Restore link speed advertisements and restart
1266                          * Auto-negotiation
1267                          */
1268                         if (hw->mac.autoneg) {
1269                                 ret_val = e1000_phy_setup_autoneg(hw);
1270                                 if (ret_val)
1271                                         goto out;
1272                         } else {
1273                                 ret_val = e1000_setup_copper_link_generic(hw);
1274                                 if (ret_val)
1275                                         goto out;
1276                         }
1277                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1278                 }
1279
1280                 goto out;
1281         }
1282
1283         ret_val = hw->phy.ops.acquire(hw);
1284         if (ret_val)
1285                 goto out;
1286
1287         /* Revert the change to the 'Link Status Change'
1288          * interrupt to trigger on 'Cable Status Change'
1289          */
1290         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1291                                              &phy_reg);
1292         if (ret_val)
1293                 goto release;
1294         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1295         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1296
1297         if (force)
1298                 /* Toggle LANPHYPC Value bit */
1299                 e1000_toggle_lanphypc_pch_lpt(hw);
1300
1301         /* Unforce SMBus mode in PHY */
1302         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1303         if (ret_val) {
1304                 /* The MAC might be in PCIe mode, so temporarily force to
1305                  * SMBus mode in order to access the PHY.
1306                  */
1307                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1308                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1309                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1310
1311                 msec_delay(50);
1312
1313                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1314                                                        &phy_reg);
1315                 if (ret_val)
1316                         goto release;
1317         }
1318         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1319         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1320
1321         /* Unforce SMBus mode in MAC */
1322         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1323         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1324         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1325
1326         /* When ULP mode was previously entered, K1 was disabled by the
1327          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1328          */
1329         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1330         if (ret_val)
1331                 goto release;
1332         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1333         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1334
1335         /* Clear ULP enabled configuration */
1336         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1337         if (ret_val)
1338                 goto release;
1339         /* CSC interrupt received due to ULP Indication */
1340         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1341                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1342                              I218_ULP_CONFIG1_STICKY_ULP |
1343                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1344                              I218_ULP_CONFIG1_WOL_HOST |
1345                              I218_ULP_CONFIG1_INBAND_EXIT |
1346                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1347                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1348
1349                 /* Commit ULP changes by starting auto ULP configuration */
1350                 phy_reg |= I218_ULP_CONFIG1_START;
1351                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1352
1353                 /* Clear Disable SMBus Release on PERST# in MAC */
1354                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1355                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1356                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1357
1358                 if (!force) {
1359                         hw->phy.ops.release(hw);
1360
1361                         if (hw->mac.autoneg)
1362                                 e1000_phy_setup_autoneg(hw);
1363
1364                         e1000_sw_lcd_config_ich8lan(hw);
1365
1366                         e1000_oem_bits_config_ich8lan(hw, true);
1367
1368                         /* Set ULP state to unknown and return non-zero to
1369                          * indicate no link (yet) and re-enter on the next LSC
1370                          * to finish disabling ULP flow.
1371                          */
1372                         hw->dev_spec.ich8lan.ulp_state =
1373                             e1000_ulp_state_unknown;
1374
1375                         return 1;
1376                 }
1377         }
1378
1379         /* Re-enable Tx */
1380         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1381         mac_reg |= E1000_TCTL_EN;
1382         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1383
1384 release:
1385         hw->phy.ops.release(hw);
1386         if (force) {
1387                 hw->phy.ops.reset(hw);
1388                 msec_delay(50);
1389         }
1390 out:
1391         if (ret_val)
1392                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1393         else
1394                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1395
1396         return ret_val;
1397 }
1398
1399 #endif /* ULP_SUPPORT */
1400 /**
1401  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1402  *  @hw: pointer to the HW structure
1403  *
1404  *  Checks to see of the link status of the hardware has changed.  If a
1405  *  change in link status has been detected, then we read the PHY registers
1406  *  to get the current speed/duplex if link exists.
1407  **/
1408 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1409 {
1410         struct e1000_mac_info *mac = &hw->mac;
1411         s32 ret_val, tipg_reg = 0;
1412         u16 emi_addr, emi_val = 0;
1413         bool link = false;
1414         u16 phy_reg;
1415
1416         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1417
1418         /* We only want to go out to the PHY registers to see if Auto-Neg
1419          * has completed and/or if our link status has changed.  The
1420          * get_link_status flag is set upon receiving a Link Status
1421          * Change or Rx Sequence Error interrupt.
1422          */
1423         if (!mac->get_link_status)
1424                 return E1000_SUCCESS;
1425
1426         if ((hw->mac.type < e1000_pch_lpt) ||
1427             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1428             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1429                 /* First we want to see if the MII Status Register reports
1430                  * link.  If so, then we want to get the current speed/duplex
1431                  * of the PHY.
1432                  */
1433                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1434                 if (ret_val)
1435                         return ret_val;
1436         } else {
1437                 /* Check the MAC's STATUS register to determine link state
1438                  * since the PHY could be inaccessible while in ULP mode.
1439                  */
1440                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1441                 if (link)
1442                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1443                 else
1444                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1445                 if (ret_val)
1446                         return ret_val;
1447         }
1448
1449         if (hw->mac.type == e1000_pchlan) {
1450                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1451                 if (ret_val)
1452                         return ret_val;
1453         }
1454
1455         /* When connected at 10Mbps half-duplex, some parts are excessively
1456          * aggressive resulting in many collisions. To avoid this, increase
1457          * the IPG and reduce Rx latency in the PHY.
1458          */
1459         if (((hw->mac.type == e1000_pch2lan) ||
1460              (hw->mac.type == e1000_pch_lpt)) && link) {
1461                 u16 speed, duplex;
1462
1463                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1464                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1465                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1466
1467                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1468                         tipg_reg |= 0xFF;
1469                         /* Reduce Rx latency in analog PHY */
1470                         emi_val = 0;
1471                 } else {
1472                         /* Roll back the default values */
1473                         tipg_reg |= 0x08;
1474                         emi_val = 1;
1475                 }
1476
1477                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1478
1479                 ret_val = hw->phy.ops.acquire(hw);
1480                 if (ret_val)
1481                         return ret_val;
1482
1483                 if (hw->mac.type == e1000_pch2lan)
1484                         emi_addr = I82579_RX_CONFIG;
1485                 else
1486                         emi_addr = I217_RX_CONFIG;
1487                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1488
1489
1490                 if (hw->mac.type >= e1000_pch_lpt) {
1491                         u16 phy_reg;
1492
1493                         hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1494                                                     &phy_reg);
1495                         phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1496                         if (speed == SPEED_100 || speed == SPEED_10)
1497                                 phy_reg |= 0x3E8;
1498                         else
1499                                 phy_reg |= 0xFA;
1500                         hw->phy.ops.write_reg_locked(hw,
1501                                                      I217_PLL_CLOCK_GATE_REG,
1502                                                      phy_reg);
1503                  }
1504                 hw->phy.ops.release(hw);
1505
1506                 if (ret_val)
1507                         return ret_val;
1508         }
1509
1510         /* I217 Packet Loss issue:
1511          * ensure that FEXTNVM4 Beacon Duration is set correctly
1512          * on power up.
1513          * Set the Beacon Duration for I217 to 8 usec
1514          */
1515         if (hw->mac.type == e1000_pch_lpt) {
1516                 u32 mac_reg;
1517
1518                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1519                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1520                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1521                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1522         }
1523
1524         /* Work-around I218 hang issue */
1525         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1526             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1527             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1528             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1529                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1530                 if (ret_val)
1531                         return ret_val;
1532         }
1533         /* Clear link partner's EEE ability */
1534         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1535
1536         /* Configure K0s minimum time */
1537         if (hw->mac.type == e1000_pch_lpt) {
1538                 e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
1539         }
1540
1541         if (!link)
1542                 return E1000_SUCCESS; /* No link detected */
1543
1544         mac->get_link_status = false;
1545
1546         switch (hw->mac.type) {
1547         case e1000_pch2lan:
1548                 ret_val = e1000_k1_workaround_lv(hw);
1549                 if (ret_val)
1550                         return ret_val;
1551                 /* fall-thru */
1552         case e1000_pchlan:
1553                 if (hw->phy.type == e1000_phy_82578) {
1554                         ret_val = e1000_link_stall_workaround_hv(hw);
1555                         if (ret_val)
1556                                 return ret_val;
1557                 }
1558
1559                 /* Workaround for PCHx parts in half-duplex:
1560                  * Set the number of preambles removed from the packet
1561                  * when it is passed from the PHY to the MAC to prevent
1562                  * the MAC from misinterpreting the packet type.
1563                  */
1564                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1565                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1566
1567                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1568                     E1000_STATUS_FD)
1569                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1570
1571                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1572                 break;
1573         default:
1574                 break;
1575         }
1576
1577         /* Check if there was DownShift, must be checked
1578          * immediately after link-up
1579          */
1580         e1000_check_downshift_generic(hw);
1581
1582         /* Enable/Disable EEE after link up */
1583         if (hw->phy.type > e1000_phy_82579) {
1584                 ret_val = e1000_set_eee_pchlan(hw);
1585                 if (ret_val)
1586                         return ret_val;
1587         }
1588
1589         /* If we are forcing speed/duplex, then we simply return since
1590          * we have already determined whether we have link or not.
1591          */
1592         if (!mac->autoneg)
1593                 return -E1000_ERR_CONFIG;
1594
1595         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1596          * of MAC speed/duplex configuration.  So we only need to
1597          * configure Collision Distance in the MAC.
1598          */
1599         mac->ops.config_collision_dist(hw);
1600
1601         /* Configure Flow Control now that Auto-Neg has completed.
1602          * First, we need to restore the desired flow control
1603          * settings because we may have had to re-autoneg with a
1604          * different link partner.
1605          */
1606         ret_val = e1000_config_fc_after_link_up_generic(hw);
1607         if (ret_val)
1608                 DEBUGOUT("Error configuring flow control\n");
1609
1610         return ret_val;
1611 }
1612
1613 /**
1614  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1615  *  @hw: pointer to the HW structure
1616  *
1617  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1618  **/
1619 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1620 {
1621         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1622
1623         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1624         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1625         switch (hw->mac.type) {
1626         case e1000_ich8lan:
1627         case e1000_ich9lan:
1628         case e1000_ich10lan:
1629                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1630                 break;
1631         case e1000_pchlan:
1632         case e1000_pch2lan:
1633         case e1000_pch_lpt:
1634                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1635                 break;
1636         default:
1637                 break;
1638         }
1639 }
1640
1641 /**
1642  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1643  *  @hw: pointer to the HW structure
1644  *
1645  *  Acquires the mutex for performing NVM operations.
1646  **/
1647 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1648 {
1649         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1650
1651         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1652
1653         return E1000_SUCCESS;
1654 }
1655
1656 /**
1657  *  e1000_release_nvm_ich8lan - Release NVM mutex
1658  *  @hw: pointer to the HW structure
1659  *
1660  *  Releases the mutex used while performing NVM operations.
1661  **/
1662 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1663 {
1664         DEBUGFUNC("e1000_release_nvm_ich8lan");
1665
1666         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1667
1668         return;
1669 }
1670
1671 /**
1672  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1673  *  @hw: pointer to the HW structure
1674  *
1675  *  Acquires the software control flag for performing PHY and select
1676  *  MAC CSR accesses.
1677  **/
1678 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1679 {
1680         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1681         s32 ret_val = E1000_SUCCESS;
1682
1683         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1684
1685         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1686
1687         while (timeout) {
1688                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1689                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1690                         break;
1691
1692                 msec_delay_irq(1);
1693                 timeout--;
1694         }
1695
1696         if (!timeout) {
1697                 DEBUGOUT("SW has already locked the resource.\n");
1698                 ret_val = -E1000_ERR_CONFIG;
1699                 goto out;
1700         }
1701
1702         timeout = SW_FLAG_TIMEOUT;
1703
1704         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1705         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1706
1707         while (timeout) {
1708                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1709                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1710                         break;
1711
1712                 msec_delay_irq(1);
1713                 timeout--;
1714         }
1715
1716         if (!timeout) {
1717                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1718                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1719                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1720                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1721                 ret_val = -E1000_ERR_CONFIG;
1722                 goto out;
1723         }
1724
1725 out:
1726         if (ret_val)
1727                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1728
1729         return ret_val;
1730 }
1731
1732 /**
1733  *  e1000_release_swflag_ich8lan - Release software control flag
1734  *  @hw: pointer to the HW structure
1735  *
1736  *  Releases the software control flag for performing PHY and select
1737  *  MAC CSR accesses.
1738  **/
1739 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1740 {
1741         u32 extcnf_ctrl;
1742
1743         DEBUGFUNC("e1000_release_swflag_ich8lan");
1744
1745         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1746
1747         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1748                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1749                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1750         } else {
1751                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1752         }
1753
1754         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1755
1756         return;
1757 }
1758
1759 /**
1760  *  e1000_check_mng_mode_ich8lan - Checks management mode
1761  *  @hw: pointer to the HW structure
1762  *
1763  *  This checks if the adapter has any manageability enabled.
1764  *  This is a function pointer entry point only called by read/write
1765  *  routines for the PHY and NVM parts.
1766  **/
1767 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1768 {
1769         u32 fwsm;
1770
1771         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1772
1773         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1774
1775         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1776                ((fwsm & E1000_FWSM_MODE_MASK) ==
1777                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1778 }
1779
1780 /**
1781  *  e1000_check_mng_mode_pchlan - Checks management mode
1782  *  @hw: pointer to the HW structure
1783  *
1784  *  This checks if the adapter has iAMT enabled.
1785  *  This is a function pointer entry point only called by read/write
1786  *  routines for the PHY and NVM parts.
1787  **/
1788 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1789 {
1790         u32 fwsm;
1791
1792         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1793
1794         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1795
1796         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1797                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1798 }
1799
1800 /**
1801  *  e1000_rar_set_pch2lan - Set receive address register
1802  *  @hw: pointer to the HW structure
1803  *  @addr: pointer to the receive address
1804  *  @index: receive address array register
1805  *
1806  *  Sets the receive address array register at index to the address passed
1807  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1808  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1809  *  Use SHRA[0-3] in place of those reserved for ME.
1810  **/
1811 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1812 {
1813         u32 rar_low, rar_high;
1814
1815         DEBUGFUNC("e1000_rar_set_pch2lan");
1816
1817         /* HW expects these in little endian so we reverse the byte order
1818          * from network order (big endian) to little endian
1819          */
1820         rar_low = ((u32) addr[0] |
1821                    ((u32) addr[1] << 8) |
1822                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1823
1824         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1825
1826         /* If MAC address zero, no need to set the AV bit */
1827         if (rar_low || rar_high)
1828                 rar_high |= E1000_RAH_AV;
1829
1830         if (index == 0) {
1831                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1832                 E1000_WRITE_FLUSH(hw);
1833                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1834                 E1000_WRITE_FLUSH(hw);
1835                 return E1000_SUCCESS;
1836         }
1837
1838         /* RAR[1-6] are owned by manageability.  Skip those and program the
1839          * next address into the SHRA register array.
1840          */
1841         if (index < (u32) (hw->mac.rar_entry_count)) {
1842                 s32 ret_val;
1843
1844                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1845                 if (ret_val)
1846                         goto out;
1847
1848                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1849                 E1000_WRITE_FLUSH(hw);
1850                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1851                 E1000_WRITE_FLUSH(hw);
1852
1853                 e1000_release_swflag_ich8lan(hw);
1854
1855                 /* verify the register updates */
1856                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1857                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1858                         return E1000_SUCCESS;
1859
1860                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1861                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1862         }
1863
1864 out:
1865         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1866         return -E1000_ERR_CONFIG;
1867 }
1868
1869 /**
1870  *  e1000_rar_set_pch_lpt - Set receive address registers
1871  *  @hw: pointer to the HW structure
1872  *  @addr: pointer to the receive address
1873  *  @index: receive address array register
1874  *
1875  *  Sets the receive address register array at index to the address passed
1876  *  in by addr. For LPT, RAR[0] is the base address register that is to
1877  *  contain the MAC address. SHRA[0-10] are the shared receive address
1878  *  registers that are shared between the Host and manageability engine (ME).
1879  **/
1880 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1881 {
1882         u32 rar_low, rar_high;
1883         u32 wlock_mac;
1884
1885         DEBUGFUNC("e1000_rar_set_pch_lpt");
1886
1887         /* HW expects these in little endian so we reverse the byte order
1888          * from network order (big endian) to little endian
1889          */
1890         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1891                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1892
1893         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1894
1895         /* If MAC address zero, no need to set the AV bit */
1896         if (rar_low || rar_high)
1897                 rar_high |= E1000_RAH_AV;
1898
1899         if (index == 0) {
1900                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1901                 E1000_WRITE_FLUSH(hw);
1902                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1903                 E1000_WRITE_FLUSH(hw);
1904                 return E1000_SUCCESS;
1905         }
1906
1907         /* The manageability engine (ME) can lock certain SHRAR registers that
1908          * it is using - those registers are unavailable for use.
1909          */
1910         if (index < hw->mac.rar_entry_count) {
1911                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1912                             E1000_FWSM_WLOCK_MAC_MASK;
1913                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1914
1915                 /* Check if all SHRAR registers are locked */
1916                 if (wlock_mac == 1)
1917                         goto out;
1918
1919                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1920                         s32 ret_val;
1921
1922                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1923
1924                         if (ret_val)
1925                                 goto out;
1926
1927                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1928                                         rar_low);
1929                         E1000_WRITE_FLUSH(hw);
1930                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1931                                         rar_high);
1932                         E1000_WRITE_FLUSH(hw);
1933
1934                         e1000_release_swflag_ich8lan(hw);
1935
1936                         /* verify the register updates */
1937                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1938                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1939                                 return E1000_SUCCESS;
1940                 }
1941         }
1942
1943 out:
1944         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1945         return -E1000_ERR_CONFIG;
1946 }
1947
1948 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1949 /**
1950  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1951  *  @hw: pointer to the HW structure
1952  *  @mc_addr_list: array of multicast addresses to program
1953  *  @mc_addr_count: number of multicast addresses to program
1954  *
1955  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1956  *  The caller must have a packed mc_addr_list of multicast addresses.
1957  **/
1958 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1959                                               u8 *mc_addr_list,
1960                                               u32 mc_addr_count)
1961 {
1962         u16 phy_reg = 0;
1963         int i;
1964         s32 ret_val;
1965
1966         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1967
1968         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1969
1970         ret_val = hw->phy.ops.acquire(hw);
1971         if (ret_val)
1972                 return;
1973
1974         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1975         if (ret_val)
1976                 goto release;
1977
1978         for (i = 0; i < hw->mac.mta_reg_count; i++) {
1979                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1980                                            (u16)(hw->mac.mta_shadow[i] &
1981                                                  0xFFFF));
1982                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1983                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
1984                                                  0xFFFF));
1985         }
1986
1987         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1988
1989 release:
1990         hw->phy.ops.release(hw);
1991 }
1992
1993 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1994 /**
1995  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1996  *  @hw: pointer to the HW structure
1997  *
1998  *  Checks if firmware is blocking the reset of the PHY.
1999  *  This is a function pointer entry point only called by
2000  *  reset routines.
2001  **/
2002 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2003 {
2004         u32 fwsm;
2005         bool blocked = false;
2006         int i = 0;
2007
2008         DEBUGFUNC("e1000_check_reset_block_ich8lan");
2009
2010         do {
2011                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2012                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2013                         blocked = true;
2014                         msec_delay(10);
2015                         continue;
2016                 }
2017                 blocked = false;
2018         } while (blocked && (i++ < 30));
2019         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2020 }
2021
2022 /**
2023  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2024  *  @hw: pointer to the HW structure
2025  *
2026  *  Assumes semaphore already acquired.
2027  *
2028  **/
2029 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2030 {
2031         u16 phy_data;
2032         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2033         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2034                 E1000_STRAP_SMT_FREQ_SHIFT;
2035         s32 ret_val;
2036
2037         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2038
2039         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2040         if (ret_val)
2041                 return ret_val;
2042
2043         phy_data &= ~HV_SMB_ADDR_MASK;
2044         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2045         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2046
2047         if (hw->phy.type == e1000_phy_i217) {
2048                 /* Restore SMBus frequency */
2049                 if (freq--) {
2050                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2051                         phy_data |= (freq & (1 << 0)) <<
2052                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2053                         phy_data |= (freq & (1 << 1)) <<
2054                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2055                 } else {
2056                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2057                 }
2058         }
2059
2060         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2061 }
2062
2063 /**
2064  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2065  *  @hw:   pointer to the HW structure
2066  *
2067  *  SW should configure the LCD from the NVM extended configuration region
2068  *  as a workaround for certain parts.
2069  **/
2070 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2071 {
2072         struct e1000_phy_info *phy = &hw->phy;
2073         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2074         s32 ret_val = E1000_SUCCESS;
2075         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2076
2077         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2078
2079         /* Initialize the PHY from the NVM on ICH platforms.  This
2080          * is needed due to an issue where the NVM configuration is
2081          * not properly autoloaded after power transitions.
2082          * Therefore, after each PHY reset, we will load the
2083          * configuration data out of the NVM manually.
2084          */
2085         switch (hw->mac.type) {
2086         case e1000_ich8lan:
2087                 if (phy->type != e1000_phy_igp_3)
2088                         return ret_val;
2089
2090                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2091                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2092                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2093                         break;
2094                 }
2095                 /* Fall-thru */
2096         case e1000_pchlan:
2097         case e1000_pch2lan:
2098         case e1000_pch_lpt:
2099                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2100                 break;
2101         default:
2102                 return ret_val;
2103         }
2104
2105         ret_val = hw->phy.ops.acquire(hw);
2106         if (ret_val)
2107                 return ret_val;
2108
2109         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2110         if (!(data & sw_cfg_mask))
2111                 goto release;
2112
2113         /* Make sure HW does not configure LCD from PHY
2114          * extended configuration before SW configuration
2115          */
2116         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2117         if ((hw->mac.type < e1000_pch2lan) &&
2118             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2119                         goto release;
2120
2121         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2122         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2123         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2124         if (!cnf_size)
2125                 goto release;
2126
2127         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2128         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2129
2130         if (((hw->mac.type == e1000_pchlan) &&
2131              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2132             (hw->mac.type > e1000_pchlan)) {
2133                 /* HW configures the SMBus address and LEDs when the
2134                  * OEM and LCD Write Enable bits are set in the NVM.
2135                  * When both NVM bits are cleared, SW will configure
2136                  * them instead.
2137                  */
2138                 ret_val = e1000_write_smbus_addr(hw);
2139                 if (ret_val)
2140                         goto release;
2141
2142                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2143                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2144                                                         (u16)data);
2145                 if (ret_val)
2146                         goto release;
2147         }
2148
2149         /* Configure LCD from extended configuration region. */
2150
2151         /* cnf_base_addr is in DWORD */
2152         word_addr = (u16)(cnf_base_addr << 1);
2153
2154         for (i = 0; i < cnf_size; i++) {
2155                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2156                                            &reg_data);
2157                 if (ret_val)
2158                         goto release;
2159
2160                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2161                                            1, &reg_addr);
2162                 if (ret_val)
2163                         goto release;
2164
2165                 /* Save off the PHY page for future writes. */
2166                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2167                         phy_page = reg_data;
2168                         continue;
2169                 }
2170
2171                 reg_addr &= PHY_REG_MASK;
2172                 reg_addr |= phy_page;
2173
2174                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2175                                                     reg_data);
2176                 if (ret_val)
2177                         goto release;
2178         }
2179
2180 release:
2181         hw->phy.ops.release(hw);
2182         return ret_val;
2183 }
2184
2185 /**
2186  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2187  *  @hw:   pointer to the HW structure
2188  *  @link: link up bool flag
2189  *
2190  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2191  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2192  *  If link is down, the function will restore the default K1 setting located
2193  *  in the NVM.
2194  **/
2195 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2196 {
2197         s32 ret_val = E1000_SUCCESS;
2198         u16 status_reg = 0;
2199         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2200
2201         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2202
2203         if (hw->mac.type != e1000_pchlan)
2204                 return E1000_SUCCESS;
2205
2206         /* Wrap the whole flow with the sw flag */
2207         ret_val = hw->phy.ops.acquire(hw);
2208         if (ret_val)
2209                 return ret_val;
2210
2211         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2212         if (link) {
2213                 if (hw->phy.type == e1000_phy_82578) {
2214                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2215                                                               &status_reg);
2216                         if (ret_val)
2217                                 goto release;
2218
2219                         status_reg &= (BM_CS_STATUS_LINK_UP |
2220                                        BM_CS_STATUS_RESOLVED |
2221                                        BM_CS_STATUS_SPEED_MASK);
2222
2223                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2224                                            BM_CS_STATUS_RESOLVED |
2225                                            BM_CS_STATUS_SPEED_1000))
2226                                 k1_enable = false;
2227                 }
2228
2229                 if (hw->phy.type == e1000_phy_82577) {
2230                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2231                                                               &status_reg);
2232                         if (ret_val)
2233                                 goto release;
2234
2235                         status_reg &= (HV_M_STATUS_LINK_UP |
2236                                        HV_M_STATUS_AUTONEG_COMPLETE |
2237                                        HV_M_STATUS_SPEED_MASK);
2238
2239                         if (status_reg == (HV_M_STATUS_LINK_UP |
2240                                            HV_M_STATUS_AUTONEG_COMPLETE |
2241                                            HV_M_STATUS_SPEED_1000))
2242                                 k1_enable = false;
2243                 }
2244
2245                 /* Link stall fix for link up */
2246                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2247                                                        0x0100);
2248                 if (ret_val)
2249                         goto release;
2250
2251         } else {
2252                 /* Link stall fix for link down */
2253                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2254                                                        0x4100);
2255                 if (ret_val)
2256                         goto release;
2257         }
2258
2259         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2260
2261 release:
2262         hw->phy.ops.release(hw);
2263
2264         return ret_val;
2265 }
2266
2267 /**
2268  *  e1000_configure_k1_ich8lan - Configure K1 power state
2269  *  @hw: pointer to the HW structure
2270  *  @enable: K1 state to configure
2271  *
2272  *  Configure the K1 power state based on the provided parameter.
2273  *  Assumes semaphore already acquired.
2274  *
2275  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2276  **/
2277 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2278 {
2279         s32 ret_val;
2280         u32 ctrl_reg = 0;
2281         u32 ctrl_ext = 0;
2282         u32 reg = 0;
2283         u16 kmrn_reg = 0;
2284
2285         DEBUGFUNC("e1000_configure_k1_ich8lan");
2286
2287         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2288                                              &kmrn_reg);
2289         if (ret_val)
2290                 return ret_val;
2291
2292         if (k1_enable)
2293                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2294         else
2295                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2296
2297         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2298                                               kmrn_reg);
2299         if (ret_val)
2300                 return ret_val;
2301
2302         usec_delay(20);
2303         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2304         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2305
2306         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2307         reg |= E1000_CTRL_FRCSPD;
2308         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2309
2310         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2311         E1000_WRITE_FLUSH(hw);
2312         usec_delay(20);
2313         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2314         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2315         E1000_WRITE_FLUSH(hw);
2316         usec_delay(20);
2317
2318         return E1000_SUCCESS;
2319 }
2320
2321 /**
2322  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2323  *  @hw:       pointer to the HW structure
2324  *  @d0_state: boolean if entering d0 or d3 device state
2325  *
2326  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2327  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2328  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2329  **/
2330 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2331 {
2332         s32 ret_val = 0;
2333         u32 mac_reg;
2334         u16 oem_reg;
2335
2336         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2337
2338         if (hw->mac.type < e1000_pchlan)
2339                 return ret_val;
2340
2341         ret_val = hw->phy.ops.acquire(hw);
2342         if (ret_val)
2343                 return ret_val;
2344
2345         if (hw->mac.type == e1000_pchlan) {
2346                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2347                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2348                         goto release;
2349         }
2350
2351         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2352         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2353                 goto release;
2354
2355         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2356
2357         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2358         if (ret_val)
2359                 goto release;
2360
2361         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2362
2363         if (d0_state) {
2364                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2365                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2366
2367                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2368                         oem_reg |= HV_OEM_BITS_LPLU;
2369         } else {
2370                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2371                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2372                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2373
2374                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2375                     E1000_PHY_CTRL_NOND0A_LPLU))
2376                         oem_reg |= HV_OEM_BITS_LPLU;
2377         }
2378
2379         /* Set Restart auto-neg to activate the bits */
2380         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2381             !hw->phy.ops.check_reset_block(hw))
2382                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2383
2384         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2385
2386 release:
2387         hw->phy.ops.release(hw);
2388
2389         return ret_val;
2390 }
2391
2392
2393 /**
2394  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2395  *  @hw:   pointer to the HW structure
2396  **/
2397 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2398 {
2399         s32 ret_val;
2400         u16 data;
2401
2402         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2403
2404         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2405         if (ret_val)
2406                 return ret_val;
2407
2408         data |= HV_KMRN_MDIO_SLOW;
2409
2410         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2411
2412         return ret_val;
2413 }
2414
2415 /**
2416  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2417  *  done after every PHY reset.
2418  **/
2419 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2420 {
2421         s32 ret_val = E1000_SUCCESS;
2422         u16 phy_data;
2423
2424         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2425
2426         if (hw->mac.type != e1000_pchlan)
2427                 return E1000_SUCCESS;
2428
2429         /* Set MDIO slow mode before any other MDIO access */
2430         if (hw->phy.type == e1000_phy_82577) {
2431                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2432                 if (ret_val)
2433                         return ret_val;
2434         }
2435
2436         if (((hw->phy.type == e1000_phy_82577) &&
2437              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2438             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2439                 /* Disable generation of early preamble */
2440                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2441                 if (ret_val)
2442                         return ret_val;
2443
2444                 /* Preamble tuning for SSC */
2445                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2446                                                 0xA204);
2447                 if (ret_val)
2448                         return ret_val;
2449         }
2450
2451         if (hw->phy.type == e1000_phy_82578) {
2452                 /* Return registers to default by doing a soft reset then
2453                  * writing 0x3140 to the control register.
2454                  */
2455                 if (hw->phy.revision < 2) {
2456                         e1000_phy_sw_reset_generic(hw);
2457                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2458                                                         0x3140);
2459                 }
2460         }
2461
2462         /* Select page 0 */
2463         ret_val = hw->phy.ops.acquire(hw);
2464         if (ret_val)
2465                 return ret_val;
2466
2467         hw->phy.addr = 1;
2468         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2469         hw->phy.ops.release(hw);
2470         if (ret_val)
2471                 return ret_val;
2472
2473         /* Configure the K1 Si workaround during phy reset assuming there is
2474          * link so that it disables K1 if link is in 1Gbps.
2475          */
2476         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2477         if (ret_val)
2478                 return ret_val;
2479
2480         /* Workaround for link disconnects on a busy hub in half duplex */
2481         ret_val = hw->phy.ops.acquire(hw);
2482         if (ret_val)
2483                 return ret_val;
2484         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2485         if (ret_val)
2486                 goto release;
2487         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2488                                                phy_data & 0x00FF);
2489         if (ret_val)
2490                 goto release;
2491
2492         /* set MSE higher to enable link to stay up when noise is high */
2493         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2494 release:
2495         hw->phy.ops.release(hw);
2496
2497         return ret_val;
2498 }
2499
2500 /**
2501  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2502  *  @hw:   pointer to the HW structure
2503  **/
2504 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2505 {
2506         u32 mac_reg;
2507         u16 i, phy_reg = 0;
2508         s32 ret_val;
2509
2510         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2511
2512         ret_val = hw->phy.ops.acquire(hw);
2513         if (ret_val)
2514                 return;
2515         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2516         if (ret_val)
2517                 goto release;
2518
2519         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2520         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2521                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2522                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2523                                            (u16)(mac_reg & 0xFFFF));
2524                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2525                                            (u16)((mac_reg >> 16) & 0xFFFF));
2526
2527                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2528                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2529                                            (u16)(mac_reg & 0xFFFF));
2530                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2531                                            (u16)((mac_reg & E1000_RAH_AV)
2532                                                  >> 16));
2533         }
2534
2535         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2536
2537 release:
2538         hw->phy.ops.release(hw);
2539 }
2540
2541 #ifndef CRC32_OS_SUPPORT
2542 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2543 {
2544         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2545         u32 i, j, mask, crc;
2546
2547         DEBUGFUNC("e1000_calc_rx_da_crc");
2548
2549         crc = 0xffffffff;
2550         for (i = 0; i < 6; i++) {
2551                 crc = crc ^ mac[i];
2552                 for (j = 8; j > 0; j--) {
2553                         mask = (crc & 1) * (-1);
2554                         crc = (crc >> 1) ^ (poly & mask);
2555                 }
2556         }
2557         return ~crc;
2558 }
2559
2560 #endif /* CRC32_OS_SUPPORT */
2561 /**
2562  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2563  *  with 82579 PHY
2564  *  @hw: pointer to the HW structure
2565  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2566  **/
2567 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2568 {
2569         s32 ret_val = E1000_SUCCESS;
2570         u16 phy_reg, data;
2571         u32 mac_reg;
2572         u16 i;
2573
2574         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2575
2576         if (hw->mac.type < e1000_pch2lan)
2577                 return E1000_SUCCESS;
2578
2579         /* disable Rx path while enabling/disabling workaround */
2580         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2581         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2582                                         phy_reg | (1 << 14));
2583         if (ret_val)
2584                 return ret_val;
2585
2586         if (enable) {
2587                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2588                  * SHRAL/H) and initial CRC values to the MAC
2589                  */
2590                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2591                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2592                         u32 addr_high, addr_low;
2593
2594                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2595                         if (!(addr_high & E1000_RAH_AV))
2596                                 continue;
2597                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2598                         mac_addr[0] = (addr_low & 0xFF);
2599                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2600                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2601                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2602                         mac_addr[4] = (addr_high & 0xFF);
2603                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2604
2605 #ifndef CRC32_OS_SUPPORT
2606                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2607                                         e1000_calc_rx_da_crc(mac_addr));
2608 #else /* CRC32_OS_SUPPORT */
2609                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2610                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2611 #endif /* CRC32_OS_SUPPORT */
2612                 }
2613
2614                 /* Write Rx addresses to the PHY */
2615                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2616
2617                 /* Enable jumbo frame workaround in the MAC */
2618                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2619                 mac_reg &= ~(1 << 14);
2620                 mac_reg |= (7 << 15);
2621                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2622
2623                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2624                 mac_reg |= E1000_RCTL_SECRC;
2625                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2626
2627                 ret_val = e1000_read_kmrn_reg_generic(hw,
2628                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2629                                                 &data);
2630                 if (ret_val)
2631                         return ret_val;
2632                 ret_val = e1000_write_kmrn_reg_generic(hw,
2633                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2634                                                 data | (1 << 0));
2635                 if (ret_val)
2636                         return ret_val;
2637                 ret_val = e1000_read_kmrn_reg_generic(hw,
2638                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2639                                                 &data);
2640                 if (ret_val)
2641                         return ret_val;
2642                 data &= ~(0xF << 8);
2643                 data |= (0xB << 8);
2644                 ret_val = e1000_write_kmrn_reg_generic(hw,
2645                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2646                                                 data);
2647                 if (ret_val)
2648                         return ret_val;
2649
2650                 /* Enable jumbo frame workaround in the PHY */
2651                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2652                 data &= ~(0x7F << 5);
2653                 data |= (0x37 << 5);
2654                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2655                 if (ret_val)
2656                         return ret_val;
2657                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2658                 data &= ~(1 << 13);
2659                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2660                 if (ret_val)
2661                         return ret_val;
2662                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2663                 data &= ~(0x3FF << 2);
2664                 data |= (E1000_TX_PTR_GAP << 2);
2665                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2666                 if (ret_val)
2667                         return ret_val;
2668                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2669                 if (ret_val)
2670                         return ret_val;
2671                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2672                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2673                                                 (1 << 10));
2674                 if (ret_val)
2675                         return ret_val;
2676         } else {
2677                 /* Write MAC register values back to h/w defaults */
2678                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2679                 mac_reg &= ~(0xF << 14);
2680                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2681
2682                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2683                 mac_reg &= ~E1000_RCTL_SECRC;
2684                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2685
2686                 ret_val = e1000_read_kmrn_reg_generic(hw,
2687                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2688                                                 &data);
2689                 if (ret_val)
2690                         return ret_val;
2691                 ret_val = e1000_write_kmrn_reg_generic(hw,
2692                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2693                                                 data & ~(1 << 0));
2694                 if (ret_val)
2695                         return ret_val;
2696                 ret_val = e1000_read_kmrn_reg_generic(hw,
2697                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2698                                                 &data);
2699                 if (ret_val)
2700                         return ret_val;
2701                 data &= ~(0xF << 8);
2702                 data |= (0xB << 8);
2703                 ret_val = e1000_write_kmrn_reg_generic(hw,
2704                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2705                                                 data);
2706                 if (ret_val)
2707                         return ret_val;
2708
2709                 /* Write PHY register values back to h/w defaults */
2710                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2711                 data &= ~(0x7F << 5);
2712                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2713                 if (ret_val)
2714                         return ret_val;
2715                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2716                 data |= (1 << 13);
2717                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2718                 if (ret_val)
2719                         return ret_val;
2720                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2721                 data &= ~(0x3FF << 2);
2722                 data |= (0x8 << 2);
2723                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2724                 if (ret_val)
2725                         return ret_val;
2726                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2727                 if (ret_val)
2728                         return ret_val;
2729                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2730                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2731                                                 ~(1 << 10));
2732                 if (ret_val)
2733                         return ret_val;
2734         }
2735
2736         /* re-enable Rx path after enabling/disabling workaround */
2737         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2738                                      ~(1 << 14));
2739 }
2740
2741 /**
2742  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2743  *  done after every PHY reset.
2744  **/
2745 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2746 {
2747         s32 ret_val = E1000_SUCCESS;
2748
2749         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2750
2751         if (hw->mac.type != e1000_pch2lan)
2752                 return E1000_SUCCESS;
2753
2754         /* Set MDIO slow mode before any other MDIO access */
2755         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2756         if (ret_val)
2757                 return ret_val;
2758
2759         ret_val = hw->phy.ops.acquire(hw);
2760         if (ret_val)
2761                 return ret_val;
2762         /* set MSE higher to enable link to stay up when noise is high */
2763         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2764         if (ret_val)
2765                 goto release;
2766         /* drop link after 5 times MSE threshold was reached */
2767         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2768 release:
2769         hw->phy.ops.release(hw);
2770
2771         return ret_val;
2772 }
2773
2774 /**
2775  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2776  *  @hw:   pointer to the HW structure
2777  *
2778  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2779  *  Disable K1 for 1000 and 100 speeds
2780  **/
2781 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2782 {
2783         s32 ret_val = E1000_SUCCESS;
2784         u16 status_reg = 0;
2785
2786         DEBUGFUNC("e1000_k1_workaround_lv");
2787
2788         if (hw->mac.type != e1000_pch2lan)
2789                 return E1000_SUCCESS;
2790
2791         /* Set K1 beacon duration based on 10Mbs speed */
2792         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2793         if (ret_val)
2794                 return ret_val;
2795
2796         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2797             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2798                 if (status_reg &
2799                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2800                         u16 pm_phy_reg;
2801
2802                         /* LV 1G/100 Packet drop issue wa  */
2803                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2804                                                        &pm_phy_reg);
2805                         if (ret_val)
2806                                 return ret_val;
2807                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2808                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2809                                                         pm_phy_reg);
2810                         if (ret_val)
2811                                 return ret_val;
2812                 } else {
2813                         u32 mac_reg;
2814                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2815                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2816                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2817                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2818                 }
2819         }
2820
2821         return ret_val;
2822 }
2823
2824 /**
2825  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2826  *  @hw:   pointer to the HW structure
2827  *  @gate: boolean set to true to gate, false to ungate
2828  *
2829  *  Gate/ungate the automatic PHY configuration via hardware; perform
2830  *  the configuration via software instead.
2831  **/
2832 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2833 {
2834         u32 extcnf_ctrl;
2835
2836         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2837
2838         if (hw->mac.type < e1000_pch2lan)
2839                 return;
2840
2841         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2842
2843         if (gate)
2844                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2845         else
2846                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2847
2848         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2849 }
2850
2851 /**
2852  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2853  *  @hw: pointer to the HW structure
2854  *
2855  *  Check the appropriate indication the MAC has finished configuring the
2856  *  PHY after a software reset.
2857  **/
2858 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2859 {
2860         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2861
2862         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2863
2864         /* Wait for basic configuration completes before proceeding */
2865         do {
2866                 data = E1000_READ_REG(hw, E1000_STATUS);
2867                 data &= E1000_STATUS_LAN_INIT_DONE;
2868                 usec_delay(100);
2869         } while ((!data) && --loop);
2870
2871         /* If basic configuration is incomplete before the above loop
2872          * count reaches 0, loading the configuration from NVM will
2873          * leave the PHY in a bad state possibly resulting in no link.
2874          */
2875         if (loop == 0)
2876                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2877
2878         /* Clear the Init Done bit for the next init event */
2879         data = E1000_READ_REG(hw, E1000_STATUS);
2880         data &= ~E1000_STATUS_LAN_INIT_DONE;
2881         E1000_WRITE_REG(hw, E1000_STATUS, data);
2882 }
2883
2884 /**
2885  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2886  *  @hw: pointer to the HW structure
2887  **/
2888 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2889 {
2890         s32 ret_val = E1000_SUCCESS;
2891         u16 reg;
2892
2893         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2894
2895         if (hw->phy.ops.check_reset_block(hw))
2896                 return E1000_SUCCESS;
2897
2898         /* Allow time for h/w to get to quiescent state after reset */
2899         msec_delay(10);
2900
2901         /* Perform any necessary post-reset workarounds */
2902         switch (hw->mac.type) {
2903         case e1000_pchlan:
2904                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2905                 if (ret_val)
2906                         return ret_val;
2907                 break;
2908         case e1000_pch2lan:
2909                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2910                 if (ret_val)
2911                         return ret_val;
2912                 break;
2913         default:
2914                 break;
2915         }
2916
2917         /* Clear the host wakeup bit after lcd reset */
2918         if (hw->mac.type >= e1000_pchlan) {
2919                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2920                 reg &= ~BM_WUC_HOST_WU_BIT;
2921                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2922         }
2923
2924         /* Configure the LCD with the extended configuration region in NVM */
2925         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2926         if (ret_val)
2927                 return ret_val;
2928
2929         /* Configure the LCD with the OEM bits in NVM */
2930         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2931
2932         if (hw->mac.type == e1000_pch2lan) {
2933                 /* Ungate automatic PHY configuration on non-managed 82579 */
2934                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2935                     E1000_ICH_FWSM_FW_VALID)) {
2936                         msec_delay(10);
2937                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2938                 }
2939
2940                 /* Set EEE LPI Update Timer to 200usec */
2941                 ret_val = hw->phy.ops.acquire(hw);
2942                 if (ret_val)
2943                         return ret_val;
2944                 ret_val = e1000_write_emi_reg_locked(hw,
2945                                                      I82579_LPI_UPDATE_TIMER,
2946                                                      0x1387);
2947                 hw->phy.ops.release(hw);
2948         }
2949
2950         return ret_val;
2951 }
2952
2953 /**
2954  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2955  *  @hw: pointer to the HW structure
2956  *
2957  *  Resets the PHY
2958  *  This is a function pointer entry point called by drivers
2959  *  or other shared routines.
2960  **/
2961 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2962 {
2963         s32 ret_val = E1000_SUCCESS;
2964
2965         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2966
2967         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2968         if ((hw->mac.type == e1000_pch2lan) &&
2969             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2970                 e1000_gate_hw_phy_config_ich8lan(hw, true);
2971
2972         ret_val = e1000_phy_hw_reset_generic(hw);
2973         if (ret_val)
2974                 return ret_val;
2975
2976         return e1000_post_phy_reset_ich8lan(hw);
2977 }
2978
2979 /**
2980  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2981  *  @hw: pointer to the HW structure
2982  *  @active: true to enable LPLU, false to disable
2983  *
2984  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2985  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2986  *  the phy speed. This function will manually set the LPLU bit and restart
2987  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2988  *  since it configures the same bit.
2989  **/
2990 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2991 {
2992         s32 ret_val;
2993         u16 oem_reg;
2994
2995         DEBUGFUNC("e1000_set_lplu_state_pchlan");
2996         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2997         if (ret_val)
2998                 return ret_val;
2999
3000         if (active)
3001                 oem_reg |= HV_OEM_BITS_LPLU;
3002         else
3003                 oem_reg &= ~HV_OEM_BITS_LPLU;
3004
3005         if (!hw->phy.ops.check_reset_block(hw))
3006                 oem_reg |= HV_OEM_BITS_RESTART_AN;
3007
3008         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3009 }
3010
3011 /**
3012  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3013  *  @hw: pointer to the HW structure
3014  *  @active: true to enable LPLU, false to disable
3015  *
3016  *  Sets the LPLU D0 state according to the active flag.  When
3017  *  activating LPLU this function also disables smart speed
3018  *  and vice versa.  LPLU will not be activated unless the
3019  *  device autonegotiation advertisement meets standards of
3020  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3021  *  This is a function pointer entry point only called by
3022  *  PHY setup routines.
3023  **/
3024 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3025 {
3026         struct e1000_phy_info *phy = &hw->phy;
3027         u32 phy_ctrl;
3028         s32 ret_val = E1000_SUCCESS;
3029         u16 data;
3030
3031         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3032
3033         if (phy->type == e1000_phy_ife)
3034                 return E1000_SUCCESS;
3035
3036         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3037
3038         if (active) {
3039                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3040                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3041
3042                 if (phy->type != e1000_phy_igp_3)
3043                         return E1000_SUCCESS;
3044
3045                 /* Call gig speed drop workaround on LPLU before accessing
3046                  * any PHY registers
3047                  */
3048                 if (hw->mac.type == e1000_ich8lan)
3049                         e1000_gig_downshift_workaround_ich8lan(hw);
3050
3051                 /* When LPLU is enabled, we should disable SmartSpeed */
3052                 ret_val = phy->ops.read_reg(hw,
3053                                             IGP01E1000_PHY_PORT_CONFIG,
3054                                             &data);
3055                 if (ret_val)
3056                         return ret_val;
3057                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3058                 ret_val = phy->ops.write_reg(hw,
3059                                              IGP01E1000_PHY_PORT_CONFIG,
3060                                              data);
3061                 if (ret_val)
3062                         return ret_val;
3063         } else {
3064                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3065                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3066
3067                 if (phy->type != e1000_phy_igp_3)
3068                         return E1000_SUCCESS;
3069
3070                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3071                  * during Dx states where the power conservation is most
3072                  * important.  During driver activity we should enable
3073                  * SmartSpeed, so performance is maintained.
3074                  */
3075                 if (phy->smart_speed == e1000_smart_speed_on) {
3076                         ret_val = phy->ops.read_reg(hw,
3077                                                     IGP01E1000_PHY_PORT_CONFIG,
3078                                                     &data);
3079                         if (ret_val)
3080                                 return ret_val;
3081
3082                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3083                         ret_val = phy->ops.write_reg(hw,
3084                                                      IGP01E1000_PHY_PORT_CONFIG,
3085                                                      data);
3086                         if (ret_val)
3087                                 return ret_val;
3088                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3089                         ret_val = phy->ops.read_reg(hw,
3090                                                     IGP01E1000_PHY_PORT_CONFIG,
3091                                                     &data);
3092                         if (ret_val)
3093                                 return ret_val;
3094
3095                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3096                         ret_val = phy->ops.write_reg(hw,
3097                                                      IGP01E1000_PHY_PORT_CONFIG,
3098                                                      data);
3099                         if (ret_val)
3100                                 return ret_val;
3101                 }
3102         }
3103
3104         return E1000_SUCCESS;
3105 }
3106
3107 /**
3108  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3109  *  @hw: pointer to the HW structure
3110  *  @active: true to enable LPLU, false to disable
3111  *
3112  *  Sets the LPLU D3 state according to the active flag.  When
3113  *  activating LPLU this function also disables smart speed
3114  *  and vice versa.  LPLU will not be activated unless the
3115  *  device autonegotiation advertisement meets standards of
3116  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3117  *  This is a function pointer entry point only called by
3118  *  PHY setup routines.
3119  **/
3120 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3121 {
3122         struct e1000_phy_info *phy = &hw->phy;
3123         u32 phy_ctrl;
3124         s32 ret_val = E1000_SUCCESS;
3125         u16 data;
3126
3127         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3128
3129         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3130
3131         if (!active) {
3132                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3133                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3134
3135                 if (phy->type != e1000_phy_igp_3)
3136                         return E1000_SUCCESS;
3137
3138                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3139                  * during Dx states where the power conservation is most
3140                  * important.  During driver activity we should enable
3141                  * SmartSpeed, so performance is maintained.
3142                  */
3143                 if (phy->smart_speed == e1000_smart_speed_on) {
3144                         ret_val = phy->ops.read_reg(hw,
3145                                                     IGP01E1000_PHY_PORT_CONFIG,
3146                                                     &data);
3147                         if (ret_val)
3148                                 return ret_val;
3149
3150                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3151                         ret_val = phy->ops.write_reg(hw,
3152                                                      IGP01E1000_PHY_PORT_CONFIG,
3153                                                      data);
3154                         if (ret_val)
3155                                 return ret_val;
3156                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3157                         ret_val = phy->ops.read_reg(hw,
3158                                                     IGP01E1000_PHY_PORT_CONFIG,
3159                                                     &data);
3160                         if (ret_val)
3161                                 return ret_val;
3162
3163                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3164                         ret_val = phy->ops.write_reg(hw,
3165                                                      IGP01E1000_PHY_PORT_CONFIG,
3166                                                      data);
3167                         if (ret_val)
3168                                 return ret_val;
3169                 }
3170         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3171                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3172                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3173                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3174                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3175
3176                 if (phy->type != e1000_phy_igp_3)
3177                         return E1000_SUCCESS;
3178
3179                 /* Call gig speed drop workaround on LPLU before accessing
3180                  * any PHY registers
3181                  */
3182                 if (hw->mac.type == e1000_ich8lan)
3183                         e1000_gig_downshift_workaround_ich8lan(hw);
3184
3185                 /* When LPLU is enabled, we should disable SmartSpeed */
3186                 ret_val = phy->ops.read_reg(hw,
3187                                             IGP01E1000_PHY_PORT_CONFIG,
3188                                             &data);
3189                 if (ret_val)
3190                         return ret_val;
3191
3192                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3193                 ret_val = phy->ops.write_reg(hw,
3194                                              IGP01E1000_PHY_PORT_CONFIG,
3195                                              data);
3196         }
3197
3198         return ret_val;
3199 }
3200
3201 /**
3202  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3203  *  @hw: pointer to the HW structure
3204  *  @bank:  pointer to the variable that returns the active bank
3205  *
3206  *  Reads signature byte from the NVM using the flash access registers.
3207  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3208  **/
3209 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3210 {
3211         u32 eecd;
3212         struct e1000_nvm_info *nvm = &hw->nvm;
3213         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3214         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3215         u32 nvm_dword = 0;
3216         u8 sig_byte = 0;
3217         s32 ret_val;
3218
3219         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3220
3221         switch (hw->mac.type) {
3222         case e1000_ich8lan:
3223         case e1000_ich9lan:
3224                 eecd = E1000_READ_REG(hw, E1000_EECD);
3225                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3226                     E1000_EECD_SEC1VAL_VALID_MASK) {
3227                         if (eecd & E1000_EECD_SEC1VAL)
3228                                 *bank = 1;
3229                         else
3230                                 *bank = 0;
3231
3232                         return E1000_SUCCESS;
3233                 }
3234                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3235                 /* fall-thru */
3236         default:
3237                 /* set bank to 0 in case flash read fails */
3238                 *bank = 0;
3239
3240                 /* Check bank 0 */
3241                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3242                                                         &sig_byte);
3243                 if (ret_val)
3244                         return ret_val;
3245                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3246                     E1000_ICH_NVM_SIG_VALUE) {
3247                         *bank = 0;
3248                         return E1000_SUCCESS;
3249                 }
3250
3251                 /* Check bank 1 */
3252                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3253                                                         bank1_offset,
3254                                                         &sig_byte);
3255                 if (ret_val)
3256                         return ret_val;
3257                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3258                     E1000_ICH_NVM_SIG_VALUE) {
3259                         *bank = 1;
3260                         return E1000_SUCCESS;
3261                 }
3262
3263                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3264                 return -E1000_ERR_NVM;
3265         }
3266 }
3267
3268 /**
3269  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3270  *  @hw: pointer to the HW structure
3271  *  @offset: The offset (in bytes) of the word(s) to read.
3272  *  @words: Size of data to read in words
3273  *  @data: Pointer to the word(s) to read at offset.
3274  *
3275  *  Reads a word(s) from the NVM using the flash access registers.
3276  **/
3277 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3278                                   u16 *data)
3279 {
3280         struct e1000_nvm_info *nvm = &hw->nvm;
3281         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3282         u32 act_offset;
3283         s32 ret_val = E1000_SUCCESS;
3284         u32 bank = 0;
3285         u16 i, word;
3286
3287         DEBUGFUNC("e1000_read_nvm_ich8lan");
3288
3289         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3290             (words == 0)) {
3291                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3292                 ret_val = -E1000_ERR_NVM;
3293                 goto out;
3294         }
3295
3296         nvm->ops.acquire(hw);
3297
3298         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3299         if (ret_val != E1000_SUCCESS) {
3300                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3301                 bank = 0;
3302         }
3303
3304         act_offset = (bank) ? nvm->flash_bank_size : 0;
3305         act_offset += offset;
3306
3307         ret_val = E1000_SUCCESS;
3308         for (i = 0; i < words; i++) {
3309                 if (dev_spec->shadow_ram[offset+i].modified) {
3310                         data[i] = dev_spec->shadow_ram[offset+i].value;
3311                 } else {
3312                         ret_val = e1000_read_flash_word_ich8lan(hw,
3313                                                                 act_offset + i,
3314                                                                 &word);
3315                         if (ret_val)
3316                                 break;
3317                         data[i] = word;
3318                 }
3319         }
3320
3321         nvm->ops.release(hw);
3322
3323 out:
3324         if (ret_val)
3325                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3326
3327         return ret_val;
3328 }
3329
3330 /**
3331  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3332  *  @hw: pointer to the HW structure
3333  *
3334  *  This function does initial flash setup so that a new read/write/erase cycle
3335  *  can be started.
3336  **/
3337 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3338 {
3339         union ich8_hws_flash_status hsfsts;
3340         s32 ret_val = -E1000_ERR_NVM;
3341
3342         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3343
3344         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3345
3346         /* Check if the flash descriptor is valid */
3347         if (!hsfsts.hsf_status.fldesvalid) {
3348                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3349                 return -E1000_ERR_NVM;
3350         }
3351
3352         /* Clear FCERR and DAEL in hw status by writing 1 */
3353         hsfsts.hsf_status.flcerr = 1;
3354         hsfsts.hsf_status.dael = 1;
3355         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3356
3357         /* Either we should have a hardware SPI cycle in progress
3358          * bit to check against, in order to start a new cycle or
3359          * FDONE bit should be changed in the hardware so that it
3360          * is 1 after hardware reset, which can then be used as an
3361          * indication whether a cycle is in progress or has been
3362          * completed.
3363          */
3364
3365         if (!hsfsts.hsf_status.flcinprog) {
3366                 /* There is no cycle running at present,
3367                  * so we can start a cycle.
3368                  * Begin by setting Flash Cycle Done.
3369                  */
3370                 hsfsts.hsf_status.flcdone = 1;
3371                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3372                 ret_val = E1000_SUCCESS;
3373         } else {
3374                 s32 i;
3375
3376                 /* Otherwise poll for sometime so the current
3377                  * cycle has a chance to end before giving up.
3378                  */
3379                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3380                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3381                                                               ICH_FLASH_HSFSTS);
3382                         if (!hsfsts.hsf_status.flcinprog) {
3383                                 ret_val = E1000_SUCCESS;
3384                                 break;
3385                         }
3386                         usec_delay(1);
3387                 }
3388                 if (ret_val == E1000_SUCCESS) {
3389                         /* Successful in waiting for previous cycle to timeout,
3390                          * now set the Flash Cycle Done.
3391                          */
3392                         hsfsts.hsf_status.flcdone = 1;
3393                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3394                                                 hsfsts.regval);
3395                 } else {
3396                         DEBUGOUT("Flash controller busy, cannot get access\n");
3397                 }
3398         }
3399
3400         return ret_val;
3401 }
3402
3403 /**
3404  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3405  *  @hw: pointer to the HW structure
3406  *  @timeout: maximum time to wait for completion
3407  *
3408  *  This function starts a flash cycle and waits for its completion.
3409  **/
3410 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3411 {
3412         union ich8_hws_flash_ctrl hsflctl;
3413         union ich8_hws_flash_status hsfsts;
3414         u32 i = 0;
3415
3416         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3417
3418         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3419         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3420         hsflctl.hsf_ctrl.flcgo = 1;
3421
3422         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3423
3424         /* wait till FDONE bit is set to 1 */
3425         do {
3426                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3427                 if (hsfsts.hsf_status.flcdone)
3428                         break;
3429                 usec_delay(1);
3430         } while (i++ < timeout);
3431
3432         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3433                 return E1000_SUCCESS;
3434
3435         return -E1000_ERR_NVM;
3436 }
3437
3438 /**
3439  *  e1000_read_flash_word_ich8lan - Read word from flash
3440  *  @hw: pointer to the HW structure
3441  *  @offset: offset to data location
3442  *  @data: pointer to the location for storing the data
3443  *
3444  *  Reads the flash word at offset into data.  Offset is converted
3445  *  to bytes before read.
3446  **/
3447 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3448                                          u16 *data)
3449 {
3450         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3451
3452         if (!data)
3453                 return -E1000_ERR_NVM;
3454
3455         /* Must convert offset into bytes. */
3456         offset <<= 1;
3457
3458         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3459 }
3460
3461 /**
3462  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3463  *  @hw: pointer to the HW structure
3464  *  @offset: The offset of the byte to read.
3465  *  @data: Pointer to a byte to store the value read.
3466  *
3467  *  Reads a single byte from the NVM using the flash access registers.
3468  **/
3469 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3470                                          u8 *data)
3471 {
3472         s32 ret_val;
3473         u16 word = 0;
3474
3475         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3476
3477         if (ret_val)
3478                 return ret_val;
3479
3480         *data = (u8)word;
3481
3482         return E1000_SUCCESS;
3483 }
3484
3485 /**
3486  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3487  *  @hw: pointer to the HW structure
3488  *  @offset: The offset (in bytes) of the byte or word to read.
3489  *  @size: Size of data to read, 1=byte 2=word
3490  *  @data: Pointer to the word to store the value read.
3491  *
3492  *  Reads a byte or word from the NVM using the flash access registers.
3493  **/
3494 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3495                                          u8 size, u16 *data)
3496 {
3497         union ich8_hws_flash_status hsfsts;
3498         union ich8_hws_flash_ctrl hsflctl;
3499         u32 flash_linear_addr;
3500         u32 flash_data = 0;
3501         s32 ret_val = -E1000_ERR_NVM;
3502         u8 count = 0;
3503
3504         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3505
3506         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3507                 return -E1000_ERR_NVM;
3508         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3509                              hw->nvm.flash_base_addr);
3510
3511         do {
3512                 usec_delay(1);
3513                 /* Steps */
3514                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3515                 if (ret_val != E1000_SUCCESS)
3516                         break;
3517                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3518
3519                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3520                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3521                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3522                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3523                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3524
3525                 ret_val = e1000_flash_cycle_ich8lan(hw,
3526                                                 ICH_FLASH_READ_COMMAND_TIMEOUT);
3527
3528                 /* Check if FCERR is set to 1, if set to 1, clear it
3529                  * and try the whole sequence a few more times, else
3530                  * read in (shift in) the Flash Data0, the order is
3531                  * least significant byte first msb to lsb
3532                  */
3533                 if (ret_val == E1000_SUCCESS) {
3534                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3535                         if (size == 1)
3536                                 *data = (u8)(flash_data & 0x000000FF);
3537                         else if (size == 2)
3538                                 *data = (u16)(flash_data & 0x0000FFFF);
3539                         break;
3540                 } else {
3541                         /* If we've gotten here, then things are probably
3542                          * completely hosed, but if the error condition is
3543                          * detected, it won't hurt to give it another try...
3544                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3545                          */
3546                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3547                                                               ICH_FLASH_HSFSTS);
3548                         if (hsfsts.hsf_status.flcerr) {
3549                                 /* Repeat for some time before giving up. */
3550                                 continue;
3551                         } else if (!hsfsts.hsf_status.flcdone) {
3552                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3553                                 break;
3554                         }
3555                 }
3556         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3557
3558         return ret_val;
3559 }
3560
3561
3562 /**
3563  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3564  *  @hw: pointer to the HW structure
3565  *  @offset: The offset (in bytes) of the word(s) to write.
3566  *  @words: Size of data to write in words
3567  *  @data: Pointer to the word(s) to write at offset.
3568  *
3569  *  Writes a byte or word to the NVM using the flash access registers.
3570  **/
3571 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3572                                    u16 *data)
3573 {
3574         struct e1000_nvm_info *nvm = &hw->nvm;
3575         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3576         u16 i;
3577
3578         DEBUGFUNC("e1000_write_nvm_ich8lan");
3579
3580         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3581             (words == 0)) {
3582                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3583                 return -E1000_ERR_NVM;
3584         }
3585
3586         nvm->ops.acquire(hw);
3587
3588         for (i = 0; i < words; i++) {
3589                 dev_spec->shadow_ram[offset+i].modified = true;
3590                 dev_spec->shadow_ram[offset+i].value = data[i];
3591         }
3592
3593         nvm->ops.release(hw);
3594
3595         return E1000_SUCCESS;
3596 }
3597
3598 /**
3599  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3600  *  @hw: pointer to the HW structure
3601  *
3602  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3603  *  which writes the checksum to the shadow ram.  The changes in the shadow
3604  *  ram are then committed to the EEPROM by processing each bank at a time
3605  *  checking for the modified bit and writing only the pending changes.
3606  *  After a successful commit, the shadow ram is cleared and is ready for
3607  *  future writes.
3608  **/
3609 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3610 {
3611         struct e1000_nvm_info *nvm = &hw->nvm;
3612         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3613         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3614         s32 ret_val;
3615         u16 data = 0;
3616
3617         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3618
3619         ret_val = e1000_update_nvm_checksum_generic(hw);
3620         if (ret_val)
3621                 goto out;
3622
3623         if (nvm->type != e1000_nvm_flash_sw)
3624                 goto out;
3625
3626         nvm->ops.acquire(hw);
3627
3628         /* We're writing to the opposite bank so if we're on bank 1,
3629          * write to bank 0 etc.  We also need to erase the segment that
3630          * is going to be written
3631          */
3632         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3633         if (ret_val != E1000_SUCCESS) {
3634                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3635                 bank = 0;
3636         }
3637
3638         if (bank == 0) {
3639                 new_bank_offset = nvm->flash_bank_size;
3640                 old_bank_offset = 0;
3641                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3642                 if (ret_val)
3643                         goto release;
3644         } else {
3645                 old_bank_offset = nvm->flash_bank_size;
3646                 new_bank_offset = 0;
3647                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3648                 if (ret_val)
3649                         goto release;
3650         }
3651         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3652                 if (dev_spec->shadow_ram[i].modified) {
3653                         data = dev_spec->shadow_ram[i].value;
3654                 } else {
3655                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3656                                                                 old_bank_offset,
3657                                                                 &data);
3658                         if (ret_val)
3659                                 break;
3660                 }
3661                 /* If the word is 0x13, then make sure the signature bits
3662                  * (15:14) are 11b until the commit has completed.
3663                  * This will allow us to write 10b which indicates the
3664                  * signature is valid.  We want to do this after the write
3665                  * has completed so that we don't mark the segment valid
3666                  * while the write is still in progress
3667                  */
3668                 if (i == E1000_ICH_NVM_SIG_WORD)
3669                         data |= E1000_ICH_NVM_SIG_MASK;
3670
3671                 /* Convert offset to bytes. */
3672                 act_offset = (i + new_bank_offset) << 1;
3673
3674                 usec_delay(100);
3675
3676                 /* Write the bytes to the new bank. */
3677                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3678                                                                act_offset,
3679                                                                (u8)data);
3680                 if (ret_val)
3681                         break;
3682
3683                 usec_delay(100);
3684                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3685                                                           act_offset + 1,
3686                                                           (u8)(data >> 8));
3687                 if (ret_val)
3688                         break;
3689         }
3690
3691         /* Don't bother writing the segment valid bits if sector
3692          * programming failed.
3693          */
3694         if (ret_val) {
3695                 DEBUGOUT("Flash commit failed.\n");
3696                 goto release;
3697         }
3698
3699         /* Finally validate the new segment by setting bit 15:14
3700          * to 10b in word 0x13 , this can be done without an
3701          * erase as well since these bits are 11 to start with
3702          * and we need to change bit 14 to 0b
3703          */
3704         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3705         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3706         if (ret_val)
3707                 goto release;
3708
3709         data &= 0xBFFF;
3710         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
3711                                                        (u8)(data >> 8));
3712         if (ret_val)
3713                 goto release;
3714
3715         /* And invalidate the previously valid segment by setting
3716          * its signature word (0x13) high_byte to 0b. This can be
3717          * done without an erase because flash erase sets all bits
3718          * to 1's. We can write 1's to 0's without an erase
3719          */
3720         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3721
3722         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3723
3724         if (ret_val)
3725                 goto release;
3726
3727         /* Great!  Everything worked, we can now clear the cached entries. */
3728         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3729                 dev_spec->shadow_ram[i].modified = false;
3730                 dev_spec->shadow_ram[i].value = 0xFFFF;
3731         }
3732
3733 release:
3734         nvm->ops.release(hw);
3735
3736         /* Reload the EEPROM, or else modifications will not appear
3737          * until after the next adapter reset.
3738          */
3739         if (!ret_val) {
3740                 nvm->ops.reload(hw);
3741                 msec_delay(10);
3742         }
3743
3744 out:
3745         if (ret_val)
3746                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3747
3748         return ret_val;
3749 }
3750
3751 /**
3752  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3753  *  @hw: pointer to the HW structure
3754  *
3755  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3756  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3757  *  calculated, in which case we need to calculate the checksum and set bit 6.
3758  **/
3759 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3760 {
3761         s32 ret_val;
3762         u16 data;
3763         u16 word;
3764         u16 valid_csum_mask;
3765
3766         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3767
3768         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3769          * the checksum needs to be fixed.  This bit is an indication that
3770          * the NVM was prepared by OEM software and did not calculate
3771          * the checksum...a likely scenario.
3772          */
3773         switch (hw->mac.type) {
3774         case e1000_pch_lpt:
3775                 word = NVM_COMPAT;
3776                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3777                 break;
3778         default:
3779                 word = NVM_FUTURE_INIT_WORD1;
3780                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3781                 break;
3782         }
3783
3784         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3785         if (ret_val)
3786                 return ret_val;
3787
3788         if (!(data & valid_csum_mask)) {
3789                 data |= valid_csum_mask;
3790                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3791                 if (ret_val)
3792                         return ret_val;
3793                 ret_val = hw->nvm.ops.update(hw);
3794                 if (ret_val)
3795                         return ret_val;
3796         }
3797
3798         return e1000_validate_nvm_checksum_generic(hw);
3799 }
3800
3801 /**
3802  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3803  *  @hw: pointer to the HW structure
3804  *  @offset: The offset (in bytes) of the byte/word to read.
3805  *  @size: Size of data to read, 1=byte 2=word
3806  *  @data: The byte(s) to write to the NVM.
3807  *
3808  *  Writes one/two bytes to the NVM using the flash access registers.
3809  **/
3810 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3811                                           u8 size, u16 data)
3812 {
3813         union ich8_hws_flash_status hsfsts;
3814         union ich8_hws_flash_ctrl hsflctl;
3815         u32 flash_linear_addr;
3816         u32 flash_data = 0;
3817         s32 ret_val;
3818         u8 count = 0;
3819
3820         DEBUGFUNC("e1000_write_ich8_data");
3821
3822         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3823                 return -E1000_ERR_NVM;
3824
3825         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3826                              hw->nvm.flash_base_addr);
3827
3828         do {
3829                 usec_delay(1);
3830                 /* Steps */
3831                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3832                 if (ret_val != E1000_SUCCESS)
3833                         break;
3834                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3835
3836                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3837                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3838                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3839                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3840
3841                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3842
3843                 if (size == 1)
3844                         flash_data = (u32)data & 0x00FF;
3845                 else
3846                         flash_data = (u32)data;
3847
3848                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3849
3850                 /* check if FCERR is set to 1 , if set to 1, clear it
3851                  * and try the whole sequence a few more times else done
3852                  */
3853                 ret_val =
3854                     e1000_flash_cycle_ich8lan(hw,
3855                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3856                 if (ret_val == E1000_SUCCESS)
3857                         break;
3858
3859                 /* If we're here, then things are most likely
3860                  * completely hosed, but if the error condition
3861                  * is detected, it won't hurt to give it another
3862                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3863                  */
3864                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3865                 if (hsfsts.hsf_status.flcerr)
3866                         /* Repeat for some time before giving up. */
3867                         continue;
3868                 if (!hsfsts.hsf_status.flcdone) {
3869                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3870                         break;
3871                 }
3872         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3873
3874         return ret_val;
3875 }
3876
3877
3878 /**
3879  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3880  *  @hw: pointer to the HW structure
3881  *  @offset: The index of the byte to read.
3882  *  @data: The byte to write to the NVM.
3883  *
3884  *  Writes a single byte to the NVM using the flash access registers.
3885  **/
3886 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3887                                           u8 data)
3888 {
3889         u16 word = (u16)data;
3890
3891         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3892
3893         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3894 }
3895
3896
3897
3898 /**
3899  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3900  *  @hw: pointer to the HW structure
3901  *  @offset: The offset of the byte to write.
3902  *  @byte: The byte to write to the NVM.
3903  *
3904  *  Writes a single byte to the NVM using the flash access registers.
3905  *  Goes through a retry algorithm before giving up.
3906  **/
3907 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3908                                                 u32 offset, u8 byte)
3909 {
3910         s32 ret_val;
3911         u16 program_retries;
3912
3913         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3914
3915         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3916         if (!ret_val)
3917                 return ret_val;
3918
3919         for (program_retries = 0; program_retries < 100; program_retries++) {
3920                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3921                 usec_delay(100);
3922                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3923                 if (ret_val == E1000_SUCCESS)
3924                         break;
3925         }
3926         if (program_retries == 100)
3927                 return -E1000_ERR_NVM;
3928
3929         return E1000_SUCCESS;
3930 }
3931
3932 /**
3933  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3934  *  @hw: pointer to the HW structure
3935  *  @bank: 0 for first bank, 1 for second bank, etc.
3936  *
3937  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3938  *  bank N is 4096 * N + flash_reg_addr.
3939  **/
3940 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3941 {
3942         struct e1000_nvm_info *nvm = &hw->nvm;
3943         union ich8_hws_flash_status hsfsts;
3944         union ich8_hws_flash_ctrl hsflctl;
3945         u32 flash_linear_addr;
3946         /* bank size is in 16bit words - adjust to bytes */
3947         u32 flash_bank_size = nvm->flash_bank_size * 2;
3948         s32 ret_val;
3949         s32 count = 0;
3950         s32 j, iteration, sector_size;
3951
3952         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3953
3954         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3955
3956         /* Determine HW Sector size: Read BERASE bits of hw flash status
3957          * register
3958          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3959          *     consecutive sectors.  The start index for the nth Hw sector
3960          *     can be calculated as = bank * 4096 + n * 256
3961          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3962          *     The start index for the nth Hw sector can be calculated
3963          *     as = bank * 4096
3964          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3965          *     (ich9 only, otherwise error condition)
3966          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3967          */
3968         switch (hsfsts.hsf_status.berasesz) {
3969         case 0:
3970                 /* Hw sector size 256 */
3971                 sector_size = ICH_FLASH_SEG_SIZE_256;
3972                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3973                 break;
3974         case 1:
3975                 sector_size = ICH_FLASH_SEG_SIZE_4K;
3976                 iteration = 1;
3977                 break;
3978         case 2:
3979                 sector_size = ICH_FLASH_SEG_SIZE_8K;
3980                 iteration = 1;
3981                 break;
3982         case 3:
3983                 sector_size = ICH_FLASH_SEG_SIZE_64K;
3984                 iteration = 1;
3985                 break;
3986         default:
3987                 return -E1000_ERR_NVM;
3988         }
3989
3990         /* Start with the base address, then add the sector offset. */
3991         flash_linear_addr = hw->nvm.flash_base_addr;
3992         flash_linear_addr += (bank) ? flash_bank_size : 0;
3993
3994         for (j = 0; j < iteration; j++) {
3995                 do {
3996                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3997
3998                         /* Steps */
3999                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
4000                         if (ret_val)
4001                                 return ret_val;
4002
4003                         /* Write a value 11 (block Erase) in Flash
4004                          * Cycle field in hw flash control
4005                          */
4006                         hsflctl.regval =
4007                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4008
4009                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4010                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4011                                                 hsflctl.regval);
4012
4013                         /* Write the last 24 bits of an index within the
4014                          * block into Flash Linear address field in Flash
4015                          * Address.
4016                          */
4017                         flash_linear_addr += (j * sector_size);
4018                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4019                                               flash_linear_addr);
4020
4021                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4022                         if (ret_val == E1000_SUCCESS)
4023                                 break;
4024
4025                         /* Check if FCERR is set to 1.  If 1,
4026                          * clear it and try the whole sequence
4027                          * a few more times else Done
4028                          */
4029                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4030                                                       ICH_FLASH_HSFSTS);
4031                         if (hsfsts.hsf_status.flcerr)
4032                                 /* repeat for some time before giving up */
4033                                 continue;
4034                         else if (!hsfsts.hsf_status.flcdone)
4035                                 return ret_val;
4036                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4037         }
4038
4039         return E1000_SUCCESS;
4040 }
4041
4042 /**
4043  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4044  *  @hw: pointer to the HW structure
4045  *  @data: Pointer to the LED settings
4046  *
4047  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4048  *  settings is all 0's or F's, set the LED default to a valid LED default
4049  *  setting.
4050  **/
4051 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4052 {
4053         s32 ret_val;
4054
4055         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4056
4057         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4058         if (ret_val) {
4059                 DEBUGOUT("NVM Read Error\n");
4060                 return ret_val;
4061         }
4062
4063         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4064                 *data = ID_LED_DEFAULT_ICH8LAN;
4065
4066         return E1000_SUCCESS;
4067 }
4068
4069 /**
4070  *  e1000_id_led_init_pchlan - store LED configurations
4071  *  @hw: pointer to the HW structure
4072  *
4073  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4074  *  the PHY LED configuration register.
4075  *
4076  *  PCH also does not have an "always on" or "always off" mode which
4077  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4078  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4079  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4080  *  link based on logic in e1000_led_[on|off]_pchlan().
4081  **/
4082 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4083 {
4084         struct e1000_mac_info *mac = &hw->mac;
4085         s32 ret_val;
4086         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4087         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4088         u16 data, i, temp, shift;
4089
4090         DEBUGFUNC("e1000_id_led_init_pchlan");
4091
4092         /* Get default ID LED modes */
4093         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4094         if (ret_val)
4095                 return ret_val;
4096
4097         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4098         mac->ledctl_mode1 = mac->ledctl_default;
4099         mac->ledctl_mode2 = mac->ledctl_default;
4100
4101         for (i = 0; i < 4; i++) {
4102                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4103                 shift = (i * 5);
4104                 switch (temp) {
4105                 case ID_LED_ON1_DEF2:
4106                 case ID_LED_ON1_ON2:
4107                 case ID_LED_ON1_OFF2:
4108                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4109                         mac->ledctl_mode1 |= (ledctl_on << shift);
4110                         break;
4111                 case ID_LED_OFF1_DEF2:
4112                 case ID_LED_OFF1_ON2:
4113                 case ID_LED_OFF1_OFF2:
4114                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4115                         mac->ledctl_mode1 |= (ledctl_off << shift);
4116                         break;
4117                 default:
4118                         /* Do nothing */
4119                         break;
4120                 }
4121                 switch (temp) {
4122                 case ID_LED_DEF1_ON2:
4123                 case ID_LED_ON1_ON2:
4124                 case ID_LED_OFF1_ON2:
4125                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4126                         mac->ledctl_mode2 |= (ledctl_on << shift);
4127                         break;
4128                 case ID_LED_DEF1_OFF2:
4129                 case ID_LED_ON1_OFF2:
4130                 case ID_LED_OFF1_OFF2:
4131                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4132                         mac->ledctl_mode2 |= (ledctl_off << shift);
4133                         break;
4134                 default:
4135                         /* Do nothing */
4136                         break;
4137                 }
4138         }
4139
4140         return E1000_SUCCESS;
4141 }
4142
4143 /**
4144  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4145  *  @hw: pointer to the HW structure
4146  *
4147  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4148  *  register, so the the bus width is hard coded.
4149  **/
4150 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4151 {
4152         struct e1000_bus_info *bus = &hw->bus;
4153         s32 ret_val;
4154
4155         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4156
4157         ret_val = e1000_get_bus_info_pcie_generic(hw);
4158
4159         /* ICH devices are "PCI Express"-ish.  They have
4160          * a configuration space, but do not contain
4161          * PCI Express Capability registers, so bus width
4162          * must be hardcoded.
4163          */
4164         if (bus->width == e1000_bus_width_unknown)
4165                 bus->width = e1000_bus_width_pcie_x1;
4166
4167         return ret_val;
4168 }
4169
4170 /**
4171  *  e1000_reset_hw_ich8lan - Reset the hardware
4172  *  @hw: pointer to the HW structure
4173  *
4174  *  Does a full reset of the hardware which includes a reset of the PHY and
4175  *  MAC.
4176  **/
4177 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4178 {
4179         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4180         u16 kum_cfg;
4181         u32 ctrl, reg;
4182         s32 ret_val;
4183
4184         DEBUGFUNC("e1000_reset_hw_ich8lan");
4185
4186         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4187          * on the last TLP read/write transaction when MAC is reset.
4188          */
4189         ret_val = e1000_disable_pcie_master_generic(hw);
4190         if (ret_val)
4191                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4192
4193         DEBUGOUT("Masking off all interrupts\n");
4194         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4195
4196         /* Disable the Transmit and Receive units.  Then delay to allow
4197          * any pending transactions to complete before we hit the MAC
4198          * with the global reset.
4199          */
4200         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4201         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4202         E1000_WRITE_FLUSH(hw);
4203
4204         msec_delay(10);
4205
4206         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4207         if (hw->mac.type == e1000_ich8lan) {
4208                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4209                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4210                 /* Set Packet Buffer Size to 16k. */
4211                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4212         }
4213
4214         if (hw->mac.type == e1000_pchlan) {
4215                 /* Save the NVM K1 bit setting*/
4216                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4217                 if (ret_val)
4218                         return ret_val;
4219
4220                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4221                         dev_spec->nvm_k1_enabled = true;
4222                 else
4223                         dev_spec->nvm_k1_enabled = false;
4224         }
4225
4226         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4227
4228         if (!hw->phy.ops.check_reset_block(hw)) {
4229                 /* Full-chip reset requires MAC and PHY reset at the same
4230                  * time to make sure the interface between MAC and the
4231                  * external PHY is reset.
4232                  */
4233                 ctrl |= E1000_CTRL_PHY_RST;
4234
4235                 /* Gate automatic PHY configuration by hardware on
4236                  * non-managed 82579
4237                  */
4238                 if ((hw->mac.type == e1000_pch2lan) &&
4239                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4240                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4241         }
4242         ret_val = e1000_acquire_swflag_ich8lan(hw);
4243         DEBUGOUT("Issuing a global reset to ich8lan\n");
4244         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4245         /* cannot issue a flush here because it hangs the hardware */
4246         msec_delay(20);
4247
4248         /* Set Phy Config Counter to 50msec */
4249         if (hw->mac.type == e1000_pch2lan) {
4250                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4251                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4252                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4253                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4254         }
4255
4256         if (!ret_val)
4257                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4258
4259         if (ctrl & E1000_CTRL_PHY_RST) {
4260                 ret_val = hw->phy.ops.get_cfg_done(hw);
4261                 if (ret_val)
4262                         return ret_val;
4263
4264                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4265                 if (ret_val)
4266                         return ret_val;
4267         }
4268
4269         /* For PCH, this write will make sure that any noise
4270          * will be detected as a CRC error and be dropped rather than show up
4271          * as a bad packet to the DMA engine.
4272          */
4273         if (hw->mac.type == e1000_pchlan)
4274                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4275
4276         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4277         E1000_READ_REG(hw, E1000_ICR);
4278
4279         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4280         reg |= E1000_KABGTXD_BGSQLBIAS;
4281         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4282
4283         return E1000_SUCCESS;
4284 }
4285
4286 /**
4287  *  e1000_init_hw_ich8lan - Initialize the hardware
4288  *  @hw: pointer to the HW structure
4289  *
4290  *  Prepares the hardware for transmit and receive by doing the following:
4291  *   - initialize hardware bits
4292  *   - initialize LED identification
4293  *   - setup receive address registers
4294  *   - setup flow control
4295  *   - setup transmit descriptors
4296  *   - clear statistics
4297  **/
4298 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4299 {
4300         struct e1000_mac_info *mac = &hw->mac;
4301         u32 ctrl_ext, txdctl, snoop;
4302         s32 ret_val;
4303         u16 i;
4304
4305         DEBUGFUNC("e1000_init_hw_ich8lan");
4306
4307         e1000_initialize_hw_bits_ich8lan(hw);
4308
4309         /* Initialize identification LED */
4310         ret_val = mac->ops.id_led_init(hw);
4311         /* An error is not fatal and we should not stop init due to this */
4312         if (ret_val)
4313                 DEBUGOUT("Error initializing identification LED\n");
4314
4315         /* Setup the receive address. */
4316         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4317
4318         /* Zero out the Multicast HASH table */
4319         DEBUGOUT("Zeroing the MTA\n");
4320         for (i = 0; i < mac->mta_reg_count; i++)
4321                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4322
4323         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4324          * the ME.  Disable wakeup by clearing the host wakeup bit.
4325          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4326          */
4327         if (hw->phy.type == e1000_phy_82578) {
4328                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4329                 i &= ~BM_WUC_HOST_WU_BIT;
4330                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4331                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4332                 if (ret_val)
4333                         return ret_val;
4334         }
4335
4336         /* Setup link and flow control */
4337         ret_val = mac->ops.setup_link(hw);
4338
4339         /* Set the transmit descriptor write-back policy for both queues */
4340         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4341         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4342                   E1000_TXDCTL_FULL_TX_DESC_WB);
4343         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4344                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4345         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4346         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4347         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4348                   E1000_TXDCTL_FULL_TX_DESC_WB);
4349         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4350                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4351         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4352
4353         /* ICH8 has opposite polarity of no_snoop bits.
4354          * By default, we should use snoop behavior.
4355          */
4356         if (mac->type == e1000_ich8lan)
4357                 snoop = PCIE_ICH8_SNOOP_ALL;
4358         else
4359                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4360         e1000_set_pcie_no_snoop_generic(hw, snoop);
4361
4362         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4363         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4364         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4365
4366         /* Clear all of the statistics registers (clear on read).  It is
4367          * important that we do this after we have tried to establish link
4368          * because the symbol error count will increment wildly if there
4369          * is no link.
4370          */
4371         e1000_clear_hw_cntrs_ich8lan(hw);
4372
4373         return ret_val;
4374 }
4375
4376 /**
4377  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4378  *  @hw: pointer to the HW structure
4379  *
4380  *  Sets/Clears required hardware bits necessary for correctly setting up the
4381  *  hardware for transmit and receive.
4382  **/
4383 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4384 {
4385         u32 reg;
4386
4387         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4388
4389         /* Extended Device Control */
4390         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4391         reg |= (1 << 22);
4392         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4393         if (hw->mac.type >= e1000_pchlan)
4394                 reg |= E1000_CTRL_EXT_PHYPDEN;
4395         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4396
4397         /* Transmit Descriptor Control 0 */
4398         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4399         reg |= (1 << 22);
4400         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4401
4402         /* Transmit Descriptor Control 1 */
4403         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4404         reg |= (1 << 22);
4405         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4406
4407         /* Transmit Arbitration Control 0 */
4408         reg = E1000_READ_REG(hw, E1000_TARC(0));
4409         if (hw->mac.type == e1000_ich8lan)
4410                 reg |= (1 << 28) | (1 << 29);
4411         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4412         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4413
4414         /* Transmit Arbitration Control 1 */
4415         reg = E1000_READ_REG(hw, E1000_TARC(1));
4416         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4417                 reg &= ~(1 << 28);
4418         else
4419                 reg |= (1 << 28);
4420         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4421         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4422
4423         /* Device Status */
4424         if (hw->mac.type == e1000_ich8lan) {
4425                 reg = E1000_READ_REG(hw, E1000_STATUS);
4426                 reg &= ~(1 << 31);
4427                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4428         }
4429
4430         /* work-around descriptor data corruption issue during nfs v2 udp
4431          * traffic, just disable the nfs filtering capability
4432          */
4433         reg = E1000_READ_REG(hw, E1000_RFCTL);
4434         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4435
4436         /* Disable IPv6 extension header parsing because some malformed
4437          * IPv6 headers can hang the Rx.
4438          */
4439         if (hw->mac.type == e1000_ich8lan)
4440                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4441         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4442
4443         /* Enable ECC on Lynxpoint */
4444         if (hw->mac.type == e1000_pch_lpt) {
4445                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4446                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4447                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4448
4449                 reg = E1000_READ_REG(hw, E1000_CTRL);
4450                 reg |= E1000_CTRL_MEHE;
4451                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4452         }
4453
4454         return;
4455 }
4456
4457 /**
4458  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4459  *  @hw: pointer to the HW structure
4460  *
4461  *  Determines which flow control settings to use, then configures flow
4462  *  control.  Calls the appropriate media-specific link configuration
4463  *  function.  Assuming the adapter has a valid link partner, a valid link
4464  *  should be established.  Assumes the hardware has previously been reset
4465  *  and the transmitter and receiver are not enabled.
4466  **/
4467 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4468 {
4469         s32 ret_val;
4470
4471         DEBUGFUNC("e1000_setup_link_ich8lan");
4472
4473         if (hw->phy.ops.check_reset_block(hw))
4474                 return E1000_SUCCESS;
4475
4476         /* ICH parts do not have a word in the NVM to determine
4477          * the default flow control setting, so we explicitly
4478          * set it to full.
4479          */
4480         if (hw->fc.requested_mode == e1000_fc_default)
4481                 hw->fc.requested_mode = e1000_fc_full;
4482
4483         /* Save off the requested flow control mode for use later.  Depending
4484          * on the link partner's capabilities, we may or may not use this mode.
4485          */
4486         hw->fc.current_mode = hw->fc.requested_mode;
4487
4488         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4489                 hw->fc.current_mode);
4490
4491         /* Continue to configure the copper link. */
4492         ret_val = hw->mac.ops.setup_physical_interface(hw);
4493         if (ret_val)
4494                 return ret_val;
4495
4496         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4497         if ((hw->phy.type == e1000_phy_82578) ||
4498             (hw->phy.type == e1000_phy_82579) ||
4499             (hw->phy.type == e1000_phy_i217) ||
4500             (hw->phy.type == e1000_phy_82577)) {
4501                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4502
4503                 ret_val = hw->phy.ops.write_reg(hw,
4504                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4505                                              hw->fc.pause_time);
4506                 if (ret_val)
4507                         return ret_val;
4508         }
4509
4510         return e1000_set_fc_watermarks_generic(hw);
4511 }
4512
4513 /**
4514  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4515  *  @hw: pointer to the HW structure
4516  *
4517  *  Configures the kumeran interface to the PHY to wait the appropriate time
4518  *  when polling the PHY, then call the generic setup_copper_link to finish
4519  *  configuring the copper link.
4520  **/
4521 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4522 {
4523         u32 ctrl;
4524         s32 ret_val;
4525         u16 reg_data;
4526
4527         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4528
4529         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4530         ctrl |= E1000_CTRL_SLU;
4531         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4532         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4533
4534         /* Set the mac to wait the maximum time between each iteration
4535          * and increase the max iterations when polling the phy;
4536          * this fixes erroneous timeouts at 10Mbps.
4537          */
4538         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4539                                                0xFFFF);
4540         if (ret_val)
4541                 return ret_val;
4542         ret_val = e1000_read_kmrn_reg_generic(hw,
4543                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4544                                               &reg_data);
4545         if (ret_val)
4546                 return ret_val;
4547         reg_data |= 0x3F;
4548         ret_val = e1000_write_kmrn_reg_generic(hw,
4549                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4550                                                reg_data);
4551         if (ret_val)
4552                 return ret_val;
4553
4554         switch (hw->phy.type) {
4555         case e1000_phy_igp_3:
4556                 ret_val = e1000_copper_link_setup_igp(hw);
4557                 if (ret_val)
4558                         return ret_val;
4559                 break;
4560         case e1000_phy_bm:
4561         case e1000_phy_82578:
4562                 ret_val = e1000_copper_link_setup_m88(hw);
4563                 if (ret_val)
4564                         return ret_val;
4565                 break;
4566         case e1000_phy_82577:
4567         case e1000_phy_82579:
4568                 ret_val = e1000_copper_link_setup_82577(hw);
4569                 if (ret_val)
4570                         return ret_val;
4571                 break;
4572         case e1000_phy_ife:
4573                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4574                                                &reg_data);
4575                 if (ret_val)
4576                         return ret_val;
4577
4578                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4579
4580                 switch (hw->phy.mdix) {
4581                 case 1:
4582                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4583                         break;
4584                 case 2:
4585                         reg_data |= IFE_PMC_FORCE_MDIX;
4586                         break;
4587                 case 0:
4588                 default:
4589                         reg_data |= IFE_PMC_AUTO_MDIX;
4590                         break;
4591                 }
4592                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4593                                                 reg_data);
4594                 if (ret_val)
4595                         return ret_val;
4596                 break;
4597         default:
4598                 break;
4599         }
4600
4601         return e1000_setup_copper_link_generic(hw);
4602 }
4603
4604 /**
4605  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4606  *  @hw: pointer to the HW structure
4607  *
4608  *  Calls the PHY specific link setup function and then calls the
4609  *  generic setup_copper_link to finish configuring the link for
4610  *  Lynxpoint PCH devices
4611  **/
4612 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4613 {
4614         u32 ctrl;
4615         s32 ret_val;
4616
4617         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4618
4619         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4620         ctrl |= E1000_CTRL_SLU;
4621         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4622         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4623
4624         ret_val = e1000_copper_link_setup_82577(hw);
4625         if (ret_val)
4626                 return ret_val;
4627
4628         return e1000_setup_copper_link_generic(hw);
4629 }
4630
4631 /**
4632  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4633  *  @hw: pointer to the HW structure
4634  *  @speed: pointer to store current link speed
4635  *  @duplex: pointer to store the current link duplex
4636  *
4637  *  Calls the generic get_speed_and_duplex to retrieve the current link
4638  *  information and then calls the Kumeran lock loss workaround for links at
4639  *  gigabit speeds.
4640  **/
4641 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4642                                           u16 *duplex)
4643 {
4644         s32 ret_val;
4645
4646         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4647
4648         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4649         if (ret_val)
4650                 return ret_val;
4651
4652         if ((hw->mac.type == e1000_ich8lan) &&
4653             (hw->phy.type == e1000_phy_igp_3) &&
4654             (*speed == SPEED_1000)) {
4655                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4656         }
4657
4658         return ret_val;
4659 }
4660
4661 /**
4662  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4663  *  @hw: pointer to the HW structure
4664  *
4665  *  Work-around for 82566 Kumeran PCS lock loss:
4666  *  On link status change (i.e. PCI reset, speed change) and link is up and
4667  *  speed is gigabit-
4668  *    0) if workaround is optionally disabled do nothing
4669  *    1) wait 1ms for Kumeran link to come up
4670  *    2) check Kumeran Diagnostic register PCS lock loss bit
4671  *    3) if not set the link is locked (all is good), otherwise...
4672  *    4) reset the PHY
4673  *    5) repeat up to 10 times
4674  *  Note: this is only called for IGP3 copper when speed is 1gb.
4675  **/
4676 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4677 {
4678         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4679         u32 phy_ctrl;
4680         s32 ret_val;
4681         u16 i, data;
4682         bool link;
4683
4684         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4685
4686         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4687                 return E1000_SUCCESS;
4688
4689         /* Make sure link is up before proceeding.  If not just return.
4690          * Attempting this while link is negotiating fouled up link
4691          * stability
4692          */
4693         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4694         if (!link)
4695                 return E1000_SUCCESS;
4696
4697         for (i = 0; i < 10; i++) {
4698                 /* read once to clear */
4699                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4700                 if (ret_val)
4701                         return ret_val;
4702                 /* and again to get new status */
4703                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4704                 if (ret_val)
4705                         return ret_val;
4706
4707                 /* check for PCS lock */
4708                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4709                         return E1000_SUCCESS;
4710
4711                 /* Issue PHY reset */
4712                 hw->phy.ops.reset(hw);
4713                 msec_delay_irq(5);
4714         }
4715         /* Disable GigE link negotiation */
4716         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4717         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4718                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4719         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4720
4721         /* Call gig speed drop workaround on Gig disable before accessing
4722          * any PHY registers
4723          */
4724         e1000_gig_downshift_workaround_ich8lan(hw);
4725
4726         /* unable to acquire PCS lock */
4727         return -E1000_ERR_PHY;
4728 }
4729
4730 /**
4731  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4732  *  @hw: pointer to the HW structure
4733  *  @state: boolean value used to set the current Kumeran workaround state
4734  *
4735  *  If ICH8, set the current Kumeran workaround state (enabled - true
4736  *  /disabled - false).
4737  **/
4738 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4739                                                  bool state)
4740 {
4741         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4742
4743         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4744
4745         if (hw->mac.type != e1000_ich8lan) {
4746                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4747                 return;
4748         }
4749
4750         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4751
4752         return;
4753 }
4754
4755 /**
4756  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4757  *  @hw: pointer to the HW structure
4758  *
4759  *  Workaround for 82566 power-down on D3 entry:
4760  *    1) disable gigabit link
4761  *    2) write VR power-down enable
4762  *    3) read it back
4763  *  Continue if successful, else issue LCD reset and repeat
4764  **/
4765 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4766 {
4767         u32 reg;
4768         u16 data;
4769         u8  retry = 0;
4770
4771         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4772
4773         if (hw->phy.type != e1000_phy_igp_3)
4774                 return;
4775
4776         /* Try the workaround twice (if needed) */
4777         do {
4778                 /* Disable link */
4779                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4780                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4781                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4782                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4783
4784                 /* Call gig speed drop workaround on Gig disable before
4785                  * accessing any PHY registers
4786                  */
4787                 if (hw->mac.type == e1000_ich8lan)
4788                         e1000_gig_downshift_workaround_ich8lan(hw);
4789
4790                 /* Write VR power-down enable */
4791                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4792                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4793                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4794                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4795
4796                 /* Read it back and test */
4797                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4798                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4799                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4800                         break;
4801
4802                 /* Issue PHY reset and repeat at most one more time */
4803                 reg = E1000_READ_REG(hw, E1000_CTRL);
4804                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4805                 retry++;
4806         } while (retry);
4807 }
4808
4809 /**
4810  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4811  *  @hw: pointer to the HW structure
4812  *
4813  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4814  *  LPLU, Gig disable, MDIC PHY reset):
4815  *    1) Set Kumeran Near-end loopback
4816  *    2) Clear Kumeran Near-end loopback
4817  *  Should only be called for ICH8[m] devices with any 1G Phy.
4818  **/
4819 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4820 {
4821         s32 ret_val;
4822         u16 reg_data;
4823
4824         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4825
4826         if ((hw->mac.type != e1000_ich8lan) ||
4827             (hw->phy.type == e1000_phy_ife))
4828                 return;
4829
4830         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4831                                               &reg_data);
4832         if (ret_val)
4833                 return;
4834         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4835         ret_val = e1000_write_kmrn_reg_generic(hw,
4836                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4837                                                reg_data);
4838         if (ret_val)
4839                 return;
4840         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4841         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4842                                      reg_data);
4843 }
4844
4845 /**
4846  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4847  *  @hw: pointer to the HW structure
4848  *
4849  *  During S0 to Sx transition, it is possible the link remains at gig
4850  *  instead of negotiating to a lower speed.  Before going to Sx, set
4851  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4852  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4853  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4854  *  needs to be written.
4855  *  Parts that support (and are linked to a partner which support) EEE in
4856  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4857  *  than 10Mbps w/o EEE.
4858  **/
4859 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4860 {
4861         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4862         u32 phy_ctrl;
4863         s32 ret_val;
4864
4865         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4866
4867         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4868         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4869
4870         if (hw->phy.type == e1000_phy_i217) {
4871                 u16 phy_reg, device_id = hw->device_id;
4872
4873                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4874                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4875                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4876                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4877                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4878
4879                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4880                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4881                 }
4882
4883                 ret_val = hw->phy.ops.acquire(hw);
4884                 if (ret_val)
4885                         goto out;
4886
4887                 if (!dev_spec->eee_disable) {
4888                         u16 eee_advert;
4889
4890                         ret_val =
4891                             e1000_read_emi_reg_locked(hw,
4892                                                       I217_EEE_ADVERTISEMENT,
4893                                                       &eee_advert);
4894                         if (ret_val)
4895                                 goto release;
4896
4897                         /* Disable LPLU if both link partners support 100BaseT
4898                          * EEE and 100Full is advertised on both ends of the
4899                          * link, and enable Auto Enable LPI since there will
4900                          * be no driver to enable LPI while in Sx.
4901                          */
4902                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4903                             (dev_spec->eee_lp_ability &
4904                              I82579_EEE_100_SUPPORTED) &&
4905                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4906                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4907                                               E1000_PHY_CTRL_NOND0A_LPLU);
4908
4909                                 /* Set Auto Enable LPI after link up */
4910                                 hw->phy.ops.read_reg_locked(hw,
4911                                                             I217_LPI_GPIO_CTRL,
4912                                                             &phy_reg);
4913                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4914                                 hw->phy.ops.write_reg_locked(hw,
4915                                                              I217_LPI_GPIO_CTRL,
4916                                                              phy_reg);
4917                         }
4918                 }
4919
4920                 /* For i217 Intel Rapid Start Technology support,
4921                  * when the system is going into Sx and no manageability engine
4922                  * is present, the driver must configure proxy to reset only on
4923                  * power good.  LPI (Low Power Idle) state must also reset only
4924                  * on power good, as well as the MTA (Multicast table array).
4925                  * The SMBus release must also be disabled on LCD reset.
4926                  */
4927                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4928                       E1000_ICH_FWSM_FW_VALID)) {
4929                         /* Enable proxy to reset only on power good. */
4930                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4931                                                     &phy_reg);
4932                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4933                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4934                                                      phy_reg);
4935
4936                         /* Set bit enable LPI (EEE) to reset only on
4937                          * power good.
4938                         */
4939                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4940                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4941                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4942
4943                         /* Disable the SMB release on LCD reset. */
4944                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4945                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4946                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4947                 }
4948
4949                 /* Enable MTA to reset for Intel Rapid Start Technology
4950                  * Support
4951                  */
4952                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4953                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4954                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4955
4956 release:
4957                 hw->phy.ops.release(hw);
4958         }
4959 out:
4960         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4961
4962         if (hw->mac.type == e1000_ich8lan)
4963                 e1000_gig_downshift_workaround_ich8lan(hw);
4964
4965         if (hw->mac.type >= e1000_pchlan) {
4966                 e1000_oem_bits_config_ich8lan(hw, false);
4967
4968                 /* Reset PHY to activate OEM bits on 82577/8 */
4969                 if (hw->mac.type == e1000_pchlan)
4970                         e1000_phy_hw_reset_generic(hw);
4971
4972                 ret_val = hw->phy.ops.acquire(hw);
4973                 if (ret_val)
4974                         return;
4975                 e1000_write_smbus_addr(hw);
4976                 hw->phy.ops.release(hw);
4977         }
4978
4979         return;
4980 }
4981
4982 /**
4983  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4984  *  @hw: pointer to the HW structure
4985  *
4986  *  During Sx to S0 transitions on non-managed devices or managed devices
4987  *  on which PHY resets are not blocked, if the PHY registers cannot be
4988  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4989  *  the PHY.
4990  *  On i217, setup Intel Rapid Start Technology.
4991  **/
4992 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4993 {
4994         s32 ret_val;
4995
4996         DEBUGFUNC("e1000_resume_workarounds_pchlan");
4997         if (hw->mac.type < e1000_pch2lan)
4998                 return E1000_SUCCESS;
4999
5000         ret_val = e1000_init_phy_workarounds_pchlan(hw);
5001         if (ret_val) {
5002                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5003                 return ret_val;
5004         }
5005
5006         /* For i217 Intel Rapid Start Technology support when the system
5007          * is transitioning from Sx and no manageability engine is present
5008          * configure SMBus to restore on reset, disable proxy, and enable
5009          * the reset on MTA (Multicast table array).
5010          */
5011         if (hw->phy.type == e1000_phy_i217) {
5012                 u16 phy_reg;
5013
5014                 ret_val = hw->phy.ops.acquire(hw);
5015                 if (ret_val) {
5016                         DEBUGOUT("Failed to setup iRST\n");
5017                         return ret_val;
5018                 }
5019
5020                 /* Clear Auto Enable LPI after link up */
5021                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5022                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5023                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5024
5025                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5026                     E1000_ICH_FWSM_FW_VALID)) {
5027                         /* Restore clear on SMB if no manageability engine
5028                          * is present
5029                          */
5030                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5031                                                               &phy_reg);
5032                         if (ret_val)
5033                                 goto release;
5034                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5035                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5036
5037                         /* Disable Proxy */
5038                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5039                 }
5040                 /* Enable reset on MTA */
5041                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5042                                                       &phy_reg);
5043                 if (ret_val)
5044                         goto release;
5045                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5046                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5047 release:
5048                 if (ret_val)
5049                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5050                 hw->phy.ops.release(hw);
5051                 return ret_val;
5052         }
5053         return E1000_SUCCESS;
5054 }
5055
5056 /**
5057  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5058  *  @hw: pointer to the HW structure
5059  *
5060  *  Return the LED back to the default configuration.
5061  **/
5062 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5063 {
5064         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5065
5066         if (hw->phy.type == e1000_phy_ife)
5067                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5068                                              0);
5069
5070         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5071         return E1000_SUCCESS;
5072 }
5073
5074 /**
5075  *  e1000_led_on_ich8lan - Turn LEDs on
5076  *  @hw: pointer to the HW structure
5077  *
5078  *  Turn on the LEDs.
5079  **/
5080 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5081 {
5082         DEBUGFUNC("e1000_led_on_ich8lan");
5083
5084         if (hw->phy.type == e1000_phy_ife)
5085                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5086                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5087
5088         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5089         return E1000_SUCCESS;
5090 }
5091
5092 /**
5093  *  e1000_led_off_ich8lan - Turn LEDs off
5094  *  @hw: pointer to the HW structure
5095  *
5096  *  Turn off the LEDs.
5097  **/
5098 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5099 {
5100         DEBUGFUNC("e1000_led_off_ich8lan");
5101
5102         if (hw->phy.type == e1000_phy_ife)
5103                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5104                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5105
5106         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5107         return E1000_SUCCESS;
5108 }
5109
5110 /**
5111  *  e1000_setup_led_pchlan - Configures SW controllable LED
5112  *  @hw: pointer to the HW structure
5113  *
5114  *  This prepares the SW controllable LED for use.
5115  **/
5116 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5117 {
5118         DEBUGFUNC("e1000_setup_led_pchlan");
5119
5120         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5121                                      (u16)hw->mac.ledctl_mode1);
5122 }
5123
5124 /**
5125  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5126  *  @hw: pointer to the HW structure
5127  *
5128  *  Return the LED back to the default configuration.
5129  **/
5130 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5131 {
5132         DEBUGFUNC("e1000_cleanup_led_pchlan");
5133
5134         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5135                                      (u16)hw->mac.ledctl_default);
5136 }
5137
5138 /**
5139  *  e1000_led_on_pchlan - Turn LEDs on
5140  *  @hw: pointer to the HW structure
5141  *
5142  *  Turn on the LEDs.
5143  **/
5144 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5145 {
5146         u16 data = (u16)hw->mac.ledctl_mode2;
5147         u32 i, led;
5148
5149         DEBUGFUNC("e1000_led_on_pchlan");
5150
5151         /* If no link, then turn LED on by setting the invert bit
5152          * for each LED that's mode is "link_up" in ledctl_mode2.
5153          */
5154         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5155                 for (i = 0; i < 3; i++) {
5156                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5157                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5158                             E1000_LEDCTL_MODE_LINK_UP)
5159                                 continue;
5160                         if (led & E1000_PHY_LED0_IVRT)
5161                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5162                         else
5163                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5164                 }
5165         }
5166
5167         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5168 }
5169
5170 /**
5171  *  e1000_led_off_pchlan - Turn LEDs off
5172  *  @hw: pointer to the HW structure
5173  *
5174  *  Turn off the LEDs.
5175  **/
5176 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5177 {
5178         u16 data = (u16)hw->mac.ledctl_mode1;
5179         u32 i, led;
5180
5181         DEBUGFUNC("e1000_led_off_pchlan");
5182
5183         /* If no link, then turn LED off by clearing the invert bit
5184          * for each LED that's mode is "link_up" in ledctl_mode1.
5185          */
5186         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5187                 for (i = 0; i < 3; i++) {
5188                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5189                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5190                             E1000_LEDCTL_MODE_LINK_UP)
5191                                 continue;
5192                         if (led & E1000_PHY_LED0_IVRT)
5193                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5194                         else
5195                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5196                 }
5197         }
5198
5199         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5200 }
5201
5202 /**
5203  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5204  *  @hw: pointer to the HW structure
5205  *
5206  *  Read appropriate register for the config done bit for completion status
5207  *  and configure the PHY through s/w for EEPROM-less parts.
5208  *
5209  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5210  *  config done bit, so only an error is logged and continues.  If we were
5211  *  to return with error, EEPROM-less silicon would not be able to be reset
5212  *  or change link.
5213  **/
5214 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5215 {
5216         s32 ret_val = E1000_SUCCESS;
5217         u32 bank = 0;
5218         u32 status;
5219
5220         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5221
5222         e1000_get_cfg_done_generic(hw);
5223
5224         /* Wait for indication from h/w that it has completed basic config */
5225         if (hw->mac.type >= e1000_ich10lan) {
5226                 e1000_lan_init_done_ich8lan(hw);
5227         } else {
5228                 ret_val = e1000_get_auto_rd_done_generic(hw);
5229                 if (ret_val) {
5230                         /* When auto config read does not complete, do not
5231                          * return with an error. This can happen in situations
5232                          * where there is no eeprom and prevents getting link.
5233                          */
5234                         DEBUGOUT("Auto Read Done did not complete\n");
5235                         ret_val = E1000_SUCCESS;
5236                 }
5237         }
5238
5239         /* Clear PHY Reset Asserted bit */
5240         status = E1000_READ_REG(hw, E1000_STATUS);
5241         if (status & E1000_STATUS_PHYRA)
5242                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5243         else
5244                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5245
5246         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5247         if (hw->mac.type <= e1000_ich9lan) {
5248                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5249                     (hw->phy.type == e1000_phy_igp_3)) {
5250                         e1000_phy_init_script_igp3(hw);
5251                 }
5252         } else {
5253                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5254                         /* Maybe we should do a basic PHY config */
5255                         DEBUGOUT("EEPROM not present\n");
5256                         ret_val = -E1000_ERR_CONFIG;
5257                 }
5258         }
5259
5260         return ret_val;
5261 }
5262
5263 /**
5264  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5265  * @hw: pointer to the HW structure
5266  *
5267  * In the case of a PHY power down to save power, or to turn off link during a
5268  * driver unload, or wake on lan is not enabled, remove the link.
5269  **/
5270 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5271 {
5272         /* If the management interface is not enabled, then power down */
5273         if (!(hw->mac.ops.check_mng_mode(hw) ||
5274               hw->phy.ops.check_reset_block(hw)))
5275                 e1000_power_down_phy_copper(hw);
5276
5277         return;
5278 }
5279
5280 /**
5281  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5282  *  @hw: pointer to the HW structure
5283  *
5284  *  Clears hardware counters specific to the silicon family and calls
5285  *  clear_hw_cntrs_generic to clear all general purpose counters.
5286  **/
5287 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5288 {
5289         u16 phy_data;
5290         s32 ret_val;
5291
5292         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5293
5294         e1000_clear_hw_cntrs_base_generic(hw);
5295
5296         E1000_READ_REG(hw, E1000_ALGNERRC);
5297         E1000_READ_REG(hw, E1000_RXERRC);
5298         E1000_READ_REG(hw, E1000_TNCRS);
5299         E1000_READ_REG(hw, E1000_CEXTERR);
5300         E1000_READ_REG(hw, E1000_TSCTC);
5301         E1000_READ_REG(hw, E1000_TSCTFC);
5302
5303         E1000_READ_REG(hw, E1000_MGTPRC);
5304         E1000_READ_REG(hw, E1000_MGTPDC);
5305         E1000_READ_REG(hw, E1000_MGTPTC);
5306
5307         E1000_READ_REG(hw, E1000_IAC);
5308         E1000_READ_REG(hw, E1000_ICRXOC);
5309
5310         /* Clear PHY statistics registers */
5311         if ((hw->phy.type == e1000_phy_82578) ||
5312             (hw->phy.type == e1000_phy_82579) ||
5313             (hw->phy.type == e1000_phy_i217) ||
5314             (hw->phy.type == e1000_phy_82577)) {
5315                 ret_val = hw->phy.ops.acquire(hw);
5316                 if (ret_val)
5317                         return;
5318                 ret_val = hw->phy.ops.set_page(hw,
5319                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5320                 if (ret_val)
5321                         goto release;
5322                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5323                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5324                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5325                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5326                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5327                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5328                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5329                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5330                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5331                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5332                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5333                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5334                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5335                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5336 release:
5337                 hw->phy.ops.release(hw);
5338         }
5339 }
5340
5341 /**
5342  *  e1000_configure_k0s_lpt - Configure K0s power state
5343  *  @hw: pointer to the HW structure
5344  *  @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
5345  *      0 corresponds to 128ns, each value over 0 doubles the duration.
5346  *  @min_time: Minimum Tx idle period allowed  - valid values are 0 to 4.
5347  *      0 corresponds to 128ns, each value over 0 doubles the duration.
5348  *
5349  *  Configure the K1 power state based on the provided parameter.
5350  *  Assumes semaphore already acquired.
5351  *
5352  *  Success returns 0, Failure returns:
5353  *      -E1000_ERR_PHY (-2) in case of access error
5354  *      -E1000_ERR_PARAM (-4) in case of parameters error
5355  **/
5356 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
5357 {
5358         s32 ret_val;
5359         u16 kmrn_reg = 0;
5360
5361         DEBUGFUNC("e1000_configure_k0s_lpt");
5362
5363         if (entry_latency > 3 || min_time > 4)
5364                 return -E1000_ERR_PARAM;
5365
5366         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
5367                                              &kmrn_reg);
5368         if (ret_val)
5369                 return ret_val;
5370
5371         /* for now don't touch the latency */
5372         kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
5373         kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
5374
5375         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
5376                                               kmrn_reg);
5377         if (ret_val)
5378                 return ret_val;
5379
5380         return E1000_SUCCESS;
5381 }