d382f778d67bd616aee352145fe5058d9d98e114
[dpdk.git] / lib / librte_pmd_e1000 / e1000 / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65 #ifdef NAHUM6_LPTH_I218_HW
66  * Ethernet Connection (2) I218-LM
67  * Ethernet Connection (2) I218-V
68 #endif
69 #ifdef NAHUM6_WPT_HW
70  * Ethernet Connection (3) I218-LM
71  * Ethernet Connection (3) I218-V
72 #endif
73  */
74
75 #include "e1000_api.h"
76
77 #if defined(NAHUM6LP_HW) && defined(ULP_IN_D0_SUPPORT)
78 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
79 #endif /* NAHUM6LP_HW && ULP_SUPPORT */
80 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
81 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
82 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
83 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
84 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
85 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
86 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
87 STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
88 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
89 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
90 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
91                                               u8 *mc_addr_list,
92                                               u32 mc_addr_count);
93 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
94 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
95 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
96 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
97 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
98                                             bool active);
99 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
100                                             bool active);
101 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
102                                    u16 words, u16 *data);
103 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
104                                     u16 words, u16 *data);
105 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
108                                             u16 *data);
109 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
110 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
111 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
112 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
116 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
117                                            u16 *speed, u16 *duplex);
118 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
120 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
122 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
123 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
124 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
125 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
126 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
127 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
128 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
129 static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
130 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
131                                           u32 offset, u8 *data);
132 static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
133                                           u8 size, u16 *data);
134 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
135                                           u32 offset, u16 *data);
136 static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
137                                                  u32 offset, u8 byte);
138 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
139 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
140 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
141 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
142 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
143 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
144
145 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
146 /* Offset 04h HSFSTS */
147 union ich8_hws_flash_status {
148         struct ich8_hsfsts {
149                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
150                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
151                 u16 dael:1; /* bit 2 Direct Access error Log */
152                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
153                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
154                 u16 reserved1:2; /* bit 13:6 Reserved */
155                 u16 reserved2:6; /* bit 13:6 Reserved */
156                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
157                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
158         } hsf_status;
159         u16 regval;
160 };
161
162 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
163 /* Offset 06h FLCTL */
164 union ich8_hws_flash_ctrl {
165         struct ich8_hsflctl {
166                 u16 flcgo:1;   /* 0 Flash Cycle Go */
167                 u16 flcycle:2;   /* 2:1 Flash Cycle */
168                 u16 reserved:5;   /* 7:3 Reserved  */
169                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
170                 u16 flockdn:6;   /* 15:10 Reserved */
171         } hsf_ctrl;
172         u16 regval;
173 };
174
175 /* ICH Flash Region Access Permissions */
176 union ich8_hws_flash_regacc {
177         struct ich8_flracc {
178                 u32 grra:8; /* 0:7 GbE region Read Access */
179                 u32 grwa:8; /* 8:15 GbE region Write Access */
180                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
181                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
182         } hsf_flregacc;
183         u16 regval;
184 };
185
186 /**
187  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
188  *  @hw: pointer to the HW structure
189  *
190  *  Test access to the PHY registers by reading the PHY ID registers.  If
191  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
192  *  otherwise assume the read PHY ID is correct if it is valid.
193  *
194  *  Assumes the sw/fw/hw semaphore is already acquired.
195  **/
196 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
197 {
198         u16 phy_reg = 0;
199         u32 phy_id = 0;
200         s32 ret_val = 0;
201         u16 retry_count;
202         u32 mac_reg = 0;
203
204         for (retry_count = 0; retry_count < 2; retry_count++) {
205                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
206                 if (ret_val || (phy_reg == 0xFFFF))
207                         continue;
208                 phy_id = (u32)(phy_reg << 16);
209
210                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
211                 if (ret_val || (phy_reg == 0xFFFF)) {
212                         phy_id = 0;
213                         continue;
214                 }
215                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
216                 break;
217         }
218
219         if (hw->phy.id) {
220                 if  (hw->phy.id == phy_id)
221                         goto out;
222         } else if (phy_id) {
223                 hw->phy.id = phy_id;
224                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
225                 goto out;
226         }
227
228         /* In case the PHY needs to be in mdio slow mode,
229          * set slow mode and try to get the PHY id again.
230          */
231         if (hw->mac.type < e1000_pch_lpt) {
232                 hw->phy.ops.release(hw);
233                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
234                 if (!ret_val)
235                         ret_val = e1000_get_phy_id(hw);
236                 hw->phy.ops.acquire(hw);
237         }
238
239         if (ret_val)
240                 return false;
241 out:
242         if (hw->mac.type == e1000_pch_lpt) {
243                 /* Unforce SMBus mode in PHY */
244                 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
245                 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
246                 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
247
248                 /* Unforce SMBus mode in MAC */
249                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
250                 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
251                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
252         }
253
254         return true;
255 }
256
257 /**
258  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
259  *  @hw: pointer to the HW structure
260  *
261  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
262  *  used to reset the PHY to a quiescent state when necessary.
263  **/
264 void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
265 {
266         u32 mac_reg;
267
268         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
269
270         /* Set Phy Config Counter to 50msec */
271         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
272         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
273         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
274         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
275
276         /* Toggle LANPHYPC Value bit */
277         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
278         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
279         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
280         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
281         E1000_WRITE_FLUSH(hw);
282         usec_delay(10);
283         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
284         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
285         E1000_WRITE_FLUSH(hw);
286
287         if (hw->mac.type < e1000_pch_lpt) {
288                 msec_delay(50);
289         } else {
290                 u16 count = 20;
291
292                 do {
293                         msec_delay(5);
294                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
295                            E1000_CTRL_EXT_LPCD) && count--);
296
297                 msec_delay(30);
298         }
299 }
300
301 /**
302  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
303  *  @hw: pointer to the HW structure
304  *
305  *  Workarounds/flow necessary for PHY initialization during driver load
306  *  and resume paths.
307  **/
308 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
309 {
310         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
311         s32 ret_val;
312
313         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
314
315         /* Gate automatic PHY configuration by hardware on managed and
316          * non-managed 82579 and newer adapters.
317          */
318         e1000_gate_hw_phy_config_ich8lan(hw, true);
319
320 #if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
321         /* It is not possible to be certain of the current state of ULP
322          * so forcibly disable it.
323          */
324         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
325
326 #endif /* NAHUM6LP_HW && ULP_SUPPORT */
327         ret_val = hw->phy.ops.acquire(hw);
328         if (ret_val) {
329                 DEBUGOUT("Failed to initialize PHY flow\n");
330                 goto out;
331         }
332
333         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
334          * inaccessible and resetting the PHY is not blocked, toggle the
335          * LANPHYPC Value bit to force the interconnect to PCIe mode.
336          */
337         switch (hw->mac.type) {
338         case e1000_pch_lpt:
339                 if (e1000_phy_is_accessible_pchlan(hw))
340                         break;
341
342                 /* Before toggling LANPHYPC, see if PHY is accessible by
343                  * forcing MAC to SMBus mode first.
344                  */
345                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
346                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
347                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
348
349                 /* Wait 50 milliseconds for MAC to finish any retries
350                  * that it might be trying to perform from previous
351                  * attempts to acknowledge any phy read requests.
352                  */
353                  msec_delay(50);
354
355                 /* fall-through */
356         case e1000_pch2lan:
357                 if (e1000_phy_is_accessible_pchlan(hw))
358                         break;
359
360                 /* fall-through */
361         case e1000_pchlan:
362                 if ((hw->mac.type == e1000_pchlan) &&
363                     (fwsm & E1000_ICH_FWSM_FW_VALID))
364                         break;
365
366                 if (hw->phy.ops.check_reset_block(hw)) {
367                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
368                         ret_val = -E1000_ERR_PHY;
369                         break;
370                 }
371
372                 /* Toggle LANPHYPC Value bit */
373                 e1000_toggle_lanphypc_pch_lpt(hw);
374                 if (hw->mac.type >= e1000_pch_lpt) {
375                         if (e1000_phy_is_accessible_pchlan(hw))
376                                 break;
377
378                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
379                          * so ensure that the MAC is also out of SMBus mode
380                          */
381                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
382                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
383                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
384
385                         if (e1000_phy_is_accessible_pchlan(hw))
386                                 break;
387
388                         ret_val = -E1000_ERR_PHY;
389                 }
390                 break;
391         default:
392                 break;
393         }
394
395         hw->phy.ops.release(hw);
396         if (!ret_val) {
397
398                 /* Check to see if able to reset PHY.  Print error if not */
399                 if (hw->phy.ops.check_reset_block(hw)) {
400                         ERROR_REPORT("Reset blocked by ME\n");
401                         goto out;
402                 }
403
404                 /* Reset the PHY before any access to it.  Doing so, ensures
405                  * that the PHY is in a known good state before we read/write
406                  * PHY registers.  The generic reset is sufficient here,
407                  * because we haven't determined the PHY type yet.
408                  */
409                 ret_val = e1000_phy_hw_reset_generic(hw);
410                 if (ret_val)
411                         goto out;
412
413                 /* On a successful reset, possibly need to wait for the PHY
414                  * to quiesce to an accessible state before returning control
415                  * to the calling function.  If the PHY does not quiesce, then
416                  * return E1000E_BLK_PHY_RESET, as this is the condition that
417                  *  the PHY is in.
418                  */
419                 ret_val = hw->phy.ops.check_reset_block(hw);
420                 if (ret_val)
421                         ERROR_REPORT("ME blocked access to PHY after reset\n");
422         }
423
424 out:
425         /* Ungate automatic PHY configuration on non-managed 82579 */
426         if ((hw->mac.type == e1000_pch2lan) &&
427             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
428                 msec_delay(10);
429                 e1000_gate_hw_phy_config_ich8lan(hw, false);
430         }
431
432         return ret_val;
433 }
434
435 /**
436  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
437  *  @hw: pointer to the HW structure
438  *
439  *  Initialize family-specific PHY parameters and function pointers.
440  **/
441 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
442 {
443         struct e1000_phy_info *phy = &hw->phy;
444         s32 ret_val;
445
446         DEBUGFUNC("e1000_init_phy_params_pchlan");
447
448         phy->addr               = 1;
449         phy->reset_delay_us     = 100;
450
451         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
452         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
453         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
454         phy->ops.set_page       = e1000_set_page_igp;
455         phy->ops.read_reg       = e1000_read_phy_reg_hv;
456         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
457         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
458         phy->ops.release        = e1000_release_swflag_ich8lan;
459         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
460         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
461         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
462         phy->ops.write_reg      = e1000_write_phy_reg_hv;
463         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
464         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
465         phy->ops.power_up       = e1000_power_up_phy_copper;
466         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
467         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
468
469         phy->id = e1000_phy_unknown;
470
471         ret_val = e1000_init_phy_workarounds_pchlan(hw);
472         if (ret_val)
473                 return ret_val;
474
475         if (phy->id == e1000_phy_unknown)
476                 switch (hw->mac.type) {
477                 default:
478                         ret_val = e1000_get_phy_id(hw);
479                         if (ret_val)
480                                 return ret_val;
481                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
482                                 break;
483                         /* fall-through */
484                 case e1000_pch2lan:
485                 case e1000_pch_lpt:
486                         /* In case the PHY needs to be in mdio slow mode,
487                          * set slow mode and try to get the PHY id again.
488                          */
489                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
490                         if (ret_val)
491                                 return ret_val;
492                         ret_val = e1000_get_phy_id(hw);
493                         if (ret_val)
494                                 return ret_val;
495                         break;
496                 }
497         phy->type = e1000_get_phy_type_from_id(phy->id);
498
499         switch (phy->type) {
500         case e1000_phy_82577:
501         case e1000_phy_82579:
502         case e1000_phy_i217:
503                 phy->ops.check_polarity = e1000_check_polarity_82577;
504                 phy->ops.force_speed_duplex =
505                         e1000_phy_force_speed_duplex_82577;
506                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
507                 phy->ops.get_info = e1000_get_phy_info_82577;
508                 phy->ops.commit = e1000_phy_sw_reset_generic;
509                 break;
510         case e1000_phy_82578:
511                 phy->ops.check_polarity = e1000_check_polarity_m88;
512                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
513                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
514                 phy->ops.get_info = e1000_get_phy_info_m88;
515                 break;
516         default:
517                 ret_val = -E1000_ERR_PHY;
518                 break;
519         }
520
521         return ret_val;
522 }
523
524 /**
525  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
526  *  @hw: pointer to the HW structure
527  *
528  *  Initialize family-specific PHY parameters and function pointers.
529  **/
530 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
531 {
532         struct e1000_phy_info *phy = &hw->phy;
533         s32 ret_val;
534         u16 i = 0;
535
536         DEBUGFUNC("e1000_init_phy_params_ich8lan");
537
538         phy->addr               = 1;
539         phy->reset_delay_us     = 100;
540
541         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
542         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
543         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
544         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
545         phy->ops.read_reg       = e1000_read_phy_reg_igp;
546         phy->ops.release        = e1000_release_swflag_ich8lan;
547         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
548         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
549         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
550         phy->ops.write_reg      = e1000_write_phy_reg_igp;
551         phy->ops.power_up       = e1000_power_up_phy_copper;
552         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
553
554         /* We may need to do this twice - once for IGP and if that fails,
555          * we'll set BM func pointers and try again
556          */
557         ret_val = e1000_determine_phy_address(hw);
558         if (ret_val) {
559                 phy->ops.write_reg = e1000_write_phy_reg_bm;
560                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
561                 ret_val = e1000_determine_phy_address(hw);
562                 if (ret_val) {
563                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
564                         return ret_val;
565                 }
566         }
567
568         phy->id = 0;
569         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
570                (i++ < 100)) {
571                 msec_delay(1);
572                 ret_val = e1000_get_phy_id(hw);
573                 if (ret_val)
574                         return ret_val;
575         }
576
577         /* Verify phy id */
578         switch (phy->id) {
579         case IGP03E1000_E_PHY_ID:
580                 phy->type = e1000_phy_igp_3;
581                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
582                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
583                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
584                 phy->ops.get_info = e1000_get_phy_info_igp;
585                 phy->ops.check_polarity = e1000_check_polarity_igp;
586                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
587                 break;
588         case IFE_E_PHY_ID:
589         case IFE_PLUS_E_PHY_ID:
590         case IFE_C_E_PHY_ID:
591                 phy->type = e1000_phy_ife;
592                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
593                 phy->ops.get_info = e1000_get_phy_info_ife;
594                 phy->ops.check_polarity = e1000_check_polarity_ife;
595                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
596                 break;
597         case BME1000_E_PHY_ID:
598                 phy->type = e1000_phy_bm;
599                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
600                 phy->ops.read_reg = e1000_read_phy_reg_bm;
601                 phy->ops.write_reg = e1000_write_phy_reg_bm;
602                 phy->ops.commit = e1000_phy_sw_reset_generic;
603                 phy->ops.get_info = e1000_get_phy_info_m88;
604                 phy->ops.check_polarity = e1000_check_polarity_m88;
605                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
606                 break;
607         default:
608                 return -E1000_ERR_PHY;
609                 break;
610         }
611
612         return E1000_SUCCESS;
613 }
614
615 /**
616  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
617  *  @hw: pointer to the HW structure
618  *
619  *  Initialize family-specific NVM parameters and function
620  *  pointers.
621  **/
622 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
623 {
624         struct e1000_nvm_info *nvm = &hw->nvm;
625         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
626         u32 gfpreg, sector_base_addr, sector_end_addr;
627         u16 i;
628
629         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
630
631         /* Can't read flash registers if the register set isn't mapped. */
632         if (!hw->flash_address) {
633                 DEBUGOUT("ERROR: Flash registers not mapped\n");
634                 return -E1000_ERR_CONFIG;
635         }
636
637         nvm->type = e1000_nvm_flash_sw;
638
639         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
640
641         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
642          * Add 1 to sector_end_addr since this sector is included in
643          * the overall size.
644          */
645         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
646         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
647
648         /* flash_base_addr is byte-aligned */
649         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
650
651         /* find total size of the NVM, then cut in half since the total
652          * size represents two separate NVM banks.
653          */
654         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
655                                 << FLASH_SECTOR_ADDR_SHIFT);
656         nvm->flash_bank_size /= 2;
657         /* Adjust to word count */
658         nvm->flash_bank_size /= sizeof(u16);
659
660         nvm->word_size = E1000_SHADOW_RAM_WORDS;
661
662         /* Clear shadow ram */
663         for (i = 0; i < nvm->word_size; i++) {
664                 dev_spec->shadow_ram[i].modified = false;
665                 dev_spec->shadow_ram[i].value    = 0xFFFF;
666         }
667
668         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
669         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
670
671         /* Function Pointers */
672         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
673         nvm->ops.release        = e1000_release_nvm_ich8lan;
674         nvm->ops.read           = e1000_read_nvm_ich8lan;
675         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
676         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
677         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
678         nvm->ops.write          = e1000_write_nvm_ich8lan;
679
680         return E1000_SUCCESS;
681 }
682
683 /**
684  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
685  *  @hw: pointer to the HW structure
686  *
687  *  Initialize family-specific MAC parameters and function
688  *  pointers.
689  **/
690 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
691 {
692         struct e1000_mac_info *mac = &hw->mac;
693 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
694         u16 pci_cfg;
695 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
696
697         DEBUGFUNC("e1000_init_mac_params_ich8lan");
698
699         /* Set media type function pointer */
700         hw->phy.media_type = e1000_media_type_copper;
701
702         /* Set mta register count */
703         mac->mta_reg_count = 32;
704         /* Set rar entry count */
705         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
706         if (mac->type == e1000_ich8lan)
707                 mac->rar_entry_count--;
708         /* Set if part includes ASF firmware */
709         mac->asf_firmware_present = true;
710         /* FWSM register */
711         mac->has_fwsm = true;
712         /* ARC subsystem not supported */
713         mac->arc_subsystem_valid = false;
714         /* Adaptive IFS supported */
715         mac->adaptive_ifs = true;
716
717         /* Function pointers */
718
719         /* bus type/speed/width */
720         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
721         /* function id */
722         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
723         /* reset */
724         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
725         /* hw initialization */
726         mac->ops.init_hw = e1000_init_hw_ich8lan;
727         /* link setup */
728         mac->ops.setup_link = e1000_setup_link_ich8lan;
729         /* physical interface setup */
730         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
731         /* check for link */
732         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
733         /* link info */
734         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
735         /* multicast address update */
736         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
737         /* clear hardware counters */
738         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
739
740         /* LED and other operations */
741         switch (mac->type) {
742         case e1000_ich8lan:
743         case e1000_ich9lan:
744         case e1000_ich10lan:
745                 /* check management mode */
746                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
747                 /* ID LED init */
748                 mac->ops.id_led_init = e1000_id_led_init_generic;
749                 /* blink LED */
750                 mac->ops.blink_led = e1000_blink_led_generic;
751                 /* setup LED */
752                 mac->ops.setup_led = e1000_setup_led_generic;
753                 /* cleanup LED */
754                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
755                 /* turn on/off LED */
756                 mac->ops.led_on = e1000_led_on_ich8lan;
757                 mac->ops.led_off = e1000_led_off_ich8lan;
758                 break;
759         case e1000_pch2lan:
760                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
761                 mac->ops.rar_set = e1000_rar_set_pch2lan;
762                 /* fall-through */
763         case e1000_pch_lpt:
764 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
765                 /* multicast address update for pch2 */
766                 mac->ops.update_mc_addr_list =
767                         e1000_update_mc_addr_list_pch2lan;
768 #endif
769         case e1000_pchlan:
770 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
771                 /* save PCH revision_id */
772                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
773                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
774 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
775                 /* check management mode */
776                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
777                 /* ID LED init */
778                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
779                 /* setup LED */
780                 mac->ops.setup_led = e1000_setup_led_pchlan;
781                 /* cleanup LED */
782                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
783                 /* turn on/off LED */
784                 mac->ops.led_on = e1000_led_on_pchlan;
785                 mac->ops.led_off = e1000_led_off_pchlan;
786                 break;
787         default:
788                 break;
789         }
790
791         if (mac->type == e1000_pch_lpt) {
792                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
793                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
794                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
795         }
796
797         /* Enable PCS Lock-loss workaround for ICH8 */
798         if (mac->type == e1000_ich8lan)
799                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
800
801         return E1000_SUCCESS;
802 }
803
804 /**
805  *  __e1000_access_emi_reg_locked - Read/write EMI register
806  *  @hw: pointer to the HW structure
807  *  @addr: EMI address to program
808  *  @data: pointer to value to read/write from/to the EMI address
809  *  @read: boolean flag to indicate read or write
810  *
811  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
812  **/
813 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
814                                          u16 *data, bool read)
815 {
816         s32 ret_val;
817
818         DEBUGFUNC("__e1000_access_emi_reg_locked");
819
820         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
821         if (ret_val)
822                 return ret_val;
823
824         if (read)
825                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
826                                                       data);
827         else
828                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
829                                                        *data);
830
831         return ret_val;
832 }
833
834 /**
835  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
836  *  @hw: pointer to the HW structure
837  *  @addr: EMI address to program
838  *  @data: value to be read from the EMI address
839  *
840  *  Assumes the SW/FW/HW Semaphore is already acquired.
841  **/
842 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
843 {
844         DEBUGFUNC("e1000_read_emi_reg_locked");
845
846         return __e1000_access_emi_reg_locked(hw, addr, data, true);
847 }
848
849 /**
850  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
851  *  @hw: pointer to the HW structure
852  *  @addr: EMI address to program
853  *  @data: value to be written to the EMI address
854  *
855  *  Assumes the SW/FW/HW Semaphore is already acquired.
856  **/
857 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
858 {
859         DEBUGFUNC("e1000_read_emi_reg_locked");
860
861         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
862 }
863
864 /**
865  *  e1000_set_eee_pchlan - Enable/disable EEE support
866  *  @hw: pointer to the HW structure
867  *
868  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
869  *  the link and the EEE capabilities of the link partner.  The LPI Control
870  *  register bits will remain set only if/when link is up.
871  *
872  *  EEE LPI must not be asserted earlier than one second after link is up.
873  *  On 82579, EEE LPI should not be enabled until such time otherwise there
874  *  can be link issues with some switches.  Other devices can have EEE LPI
875  *  enabled immediately upon link up since they have a timer in hardware which
876  *  prevents LPI from being asserted too early.
877  **/
878 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
879 {
880         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
881         s32 ret_val;
882         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
883
884         DEBUGFUNC("e1000_set_eee_pchlan");
885
886         switch (hw->phy.type) {
887         case e1000_phy_82579:
888                 lpa = I82579_EEE_LP_ABILITY;
889                 pcs_status = I82579_EEE_PCS_STATUS;
890                 adv_addr = I82579_EEE_ADVERTISEMENT;
891                 break;
892         case e1000_phy_i217:
893                 lpa = I217_EEE_LP_ABILITY;
894                 pcs_status = I217_EEE_PCS_STATUS;
895                 adv_addr = I217_EEE_ADVERTISEMENT;
896                 break;
897         default:
898                 return E1000_SUCCESS;
899         }
900
901         ret_val = hw->phy.ops.acquire(hw);
902         if (ret_val)
903                 return ret_val;
904
905         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
906         if (ret_val)
907                 goto release;
908
909         /* Clear bits that enable EEE in various speeds */
910         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
911
912         /* Enable EEE if not disabled by user */
913         if (!dev_spec->eee_disable) {
914                 /* Save off link partner's EEE ability */
915                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
916                                                     &dev_spec->eee_lp_ability);
917                 if (ret_val)
918                         goto release;
919
920                 /* Read EEE advertisement */
921                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
922                 if (ret_val)
923                         goto release;
924
925                 /* Enable EEE only for speeds in which the link partner is
926                  * EEE capable and for which we advertise EEE.
927                  */
928                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
929                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
930
931                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
932                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
933                         if (data & NWAY_LPAR_100TX_FD_CAPS)
934                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
935                         else
936                                 /* EEE is not supported in 100Half, so ignore
937                                  * partner's EEE in 100 ability if full-duplex
938                                  * is not advertised.
939                                  */
940                                 dev_spec->eee_lp_ability &=
941                                     ~I82579_EEE_100_SUPPORTED;
942                 }
943         }
944
945         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
946         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
947         if (ret_val)
948                 goto release;
949
950         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
951 release:
952         hw->phy.ops.release(hw);
953
954         return ret_val;
955 }
956
957 /**
958  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
959  *  @hw:   pointer to the HW structure
960  *  @link: link up bool flag
961  *
962  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
963  *  preventing further DMA write requests.  Workaround the issue by disabling
964  *  the de-assertion of the clock request when in 1Gpbs mode.
965  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
966  *  speeds in order to avoid Tx hangs.
967  **/
968 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
969 {
970         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
971         u32 status = E1000_READ_REG(hw, E1000_STATUS);
972         s32 ret_val = E1000_SUCCESS;
973         u16 reg;
974
975         if (link && (status & E1000_STATUS_SPEED_1000)) {
976                 ret_val = hw->phy.ops.acquire(hw);
977                 if (ret_val)
978                         return ret_val;
979
980                 ret_val =
981                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
982                                                &reg);
983                 if (ret_val)
984                         goto release;
985
986                 ret_val =
987                     e1000_write_kmrn_reg_locked(hw,
988                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
989                                                 reg &
990                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
991                 if (ret_val)
992                         goto release;
993
994                 usec_delay(10);
995
996                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
997                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
998
999                 ret_val =
1000                     e1000_write_kmrn_reg_locked(hw,
1001                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1002                                                 reg);
1003 release:
1004                 hw->phy.ops.release(hw);
1005         } else {
1006                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1007                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1008
1009                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1010                               (status & E1000_STATUS_FD)))
1011                         goto update_fextnvm6;
1012
1013                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1014                 if (ret_val)
1015                         return ret_val;
1016
1017                 /* Clear link status transmit timeout */
1018                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1019
1020                 if (status & E1000_STATUS_SPEED_100) {
1021                         /* Set inband Tx timeout to 5x10us for 100Half */
1022                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1023
1024                         /* Do not extend the K1 entry latency for 100Half */
1025                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1026                 } else {
1027                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1028                         reg |= 50 <<
1029                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1030
1031                         /* Extend the K1 entry latency for 10 Mbps */
1032                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1033                 }
1034
1035                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1036                 if (ret_val)
1037                         return ret_val;
1038
1039 update_fextnvm6:
1040                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1041         }
1042
1043         return ret_val;
1044 }
1045
1046 #ifdef C10_SUPPORT
1047 /**
1048  * e1000_demote_ltr - Demote/Promote the LTR value
1049  * @hw: pointer to the HW structure
1050  * @demote: boolean value to control whether we are demoting or promoting
1051  *    the LTR value (promoting allows deeper C-States).
1052  * @link: boolean value stating whether we currently have link
1053  *
1054  * Configure the LTRV register with the proper LTR value
1055  **/
1056 void e1000_demote_ltr(struct e1000_hw *hw, bool demote, bool link)
1057 {
1058         u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1059                   link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1060
1061         if ((hw->device_id != E1000_DEV_ID_PCH_I218_LM3) &&
1062             (hw->device_id != E1000_DEV_ID_PCH_I218_V3))
1063                 return;
1064
1065         if (demote) {
1066                 reg |= hw->dev_spec.ich8lan.lat_enc |
1067                        (hw->dev_spec.ich8lan.lat_enc <<
1068                         E1000_LTRV_NOSNOOP_SHIFT);
1069         } else {
1070                 reg |= hw->dev_spec.ich8lan.max_ltr_enc |
1071                        (hw->dev_spec.ich8lan.max_ltr_enc <<
1072                         E1000_LTRV_NOSNOOP_SHIFT);
1073         }
1074
1075         E1000_WRITE_REG(hw, E1000_LTRV, reg);
1076         return;
1077 }
1078
1079 #endif /* C10_SUPPORT */
1080 #if defined(NAHUM6LP_HW) && defined(ULP_SUPPORT)
1081 /**
1082  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1083  *  @hw: pointer to the HW structure
1084  *  @to_sx: boolean indicating a system power state transition to Sx
1085  *
1086  *  When link is down, configure ULP mode to significantly reduce the power
1087  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1088  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1089  *  system, configure the ULP mode by software.
1090  */
1091 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1092 {
1093         u32 mac_reg;
1094         s32 ret_val = E1000_SUCCESS;
1095         u16 phy_reg;
1096
1097         if ((hw->mac.type < e1000_pch_lpt) ||
1098             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1099             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1100 #ifdef NAHUM6_LPTH_I218_HW
1101             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1102             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1103 #endif
1104             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1105                 return 0;
1106
1107 #ifdef ULP_IN_D0_SUPPORT
1108         if (!to_sx) {
1109                 int i = 0;
1110
1111                 /* Poll up to 5 seconds for Cable Disconnected indication */
1112                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1113                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1114                         /* Bail if link is re-acquired */
1115                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1116                                 return -E1000_ERR_PHY;
1117
1118                         if (i++ == 100)
1119                                 break;
1120
1121                         msec_delay(50);
1122                 }
1123                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1124                           (E1000_READ_REG(hw, E1000_FEXT) &
1125                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1126                           i * 50);
1127         }
1128
1129 #endif /* ULP_IN_D0_SUPPORT */
1130         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1131                 /* Request ME configure ULP mode in the PHY */
1132                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1133                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1134                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1135
1136                 goto out;
1137         }
1138
1139 #ifndef ULP_IN_D0_SUPPORT
1140         if (!to_sx) {
1141                 int i = 0;
1142
1143                 /* Poll up to 5 seconds for Cable Disconnected indication */
1144                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1145                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1146                         /* Bail if link is re-acquired */
1147                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1148                                 return -E1000_ERR_PHY;
1149
1150                         if (i++ == 100)
1151                                 break;
1152
1153                         msec_delay(50);
1154                 }
1155                 DEBUGOUT("CABLE_DISCONNECTED %s set after %dmsec\n",
1156                          (E1000_READ_REG(hw, E1000_FEXT) &
1157                           E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1158                          i * 50);
1159         }
1160
1161 #endif /* !ULP_IN_D0_SUPPORT */
1162         ret_val = hw->phy.ops.acquire(hw);
1163         if (ret_val)
1164                 goto out;
1165
1166         /* Force SMBus mode in PHY */
1167         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1168         if (ret_val)
1169                 goto release;
1170         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1171         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1172
1173         /* Force SMBus mode in MAC */
1174         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1175         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1176         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1177
1178 #ifdef ULP_IN_D0_SUPPORT
1179         if (!to_sx) {
1180                 /* Change the 'Link Status Change' interrupt to trigger
1181                  * on 'Cable Status Change'
1182                  */
1183                 ret_val = e1000_read_kmrn_reg_locked(hw,
1184                                                      E1000_KMRNCTRLSTA_OP_MODES,
1185                                                      &phy_reg);
1186                 if (ret_val)
1187                         goto release;
1188                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1189                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1190                                             phy_reg);
1191         }
1192
1193 #endif /* ULP_IN_D0_SUPPORT */
1194         /* Set Inband ULP Exit, Reset to SMBus mode and
1195          * Disable SMBus Release on PERST# in PHY
1196          */
1197         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1198         if (ret_val)
1199                 goto release;
1200         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1201                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1202         if (to_sx) {
1203                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1204                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1205
1206                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1207         } else {
1208                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1209         }
1210         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1211
1212         /* Set Disable SMBus Release on PERST# in MAC */
1213         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1214         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1215         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1216
1217         /* Commit ULP changes in PHY by starting auto ULP configuration */
1218         phy_reg |= I218_ULP_CONFIG1_START;
1219         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1220 #ifdef ULP_IN_D0_SUPPORT
1221
1222         if (!to_sx) {
1223                 /* Disable Tx so that the MAC doesn't send any (buffered)
1224                  * packets to the PHY.
1225                  */
1226                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1227                 mac_reg &= ~E1000_TCTL_EN;
1228                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1229         }
1230 #endif
1231 release:
1232         hw->phy.ops.release(hw);
1233 out:
1234         if (ret_val)
1235                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1236         else
1237                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1238
1239         return ret_val;
1240 }
1241
1242 /**
1243  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1244  *  @hw: pointer to the HW structure
1245  *  @force: boolean indicating whether or not to force disabling ULP
1246  *
1247  *  Un-configure ULP mode when link is up, the system is transitioned from
1248  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1249  *  system, poll for an indication from ME that ULP has been un-configured.
1250  *  If not on an ME enabled system, un-configure the ULP mode by software.
1251  *
1252  *  During nominal operation, this function is called when link is acquired
1253  *  to disable ULP mode (force=false); otherwise, for example when unloading
1254  *  the driver or during Sx->S0 transitions, this is called with force=true
1255  *  to forcibly disable ULP.
1256 #ifdef ULP_IN_D0_SUPPORT
1257
1258  *  When the cable is plugged in while the device is in D0, a Cable Status
1259  *  Change interrupt is generated which causes this function to be called
1260  *  to partially disable ULP mode and restart autonegotiation.  This function
1261  *  is then called again due to the resulting Link Status Change interrupt
1262  *  to finish cleaning up after the ULP flow.
1263 #endif
1264  */
1265 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1266 {
1267         s32 ret_val = E1000_SUCCESS;
1268         u32 mac_reg;
1269         u16 phy_reg;
1270         int i = 0;
1271
1272         if ((hw->mac.type < e1000_pch_lpt) ||
1273             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1274             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1275 #ifdef NAHUM6_LPTH_I218_HW
1276             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1277             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1278 #endif
1279             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1280                 return 0;
1281
1282         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1283                 if (force) {
1284                         /* Request ME un-configure ULP mode in the PHY */
1285                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1286                         mac_reg &= ~E1000_H2ME_ULP;
1287                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1288                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1289                 }
1290
1291                 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1292                 while (E1000_READ_REG(hw, E1000_FWSM) &
1293                        E1000_FWSM_ULP_CFG_DONE) {
1294                         if (i++ == 10) {
1295                                 ret_val = -E1000_ERR_PHY;
1296                                 goto out;
1297                         }
1298
1299                         msec_delay(10);
1300                 }
1301                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1302
1303                 if (force) {
1304                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1305                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1306                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1307                 } else {
1308                         /* Clear H2ME.ULP after ME ULP configuration */
1309                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1310                         mac_reg &= ~E1000_H2ME_ULP;
1311                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1312 #ifdef ULP_IN_D0_SUPPORT
1313
1314                         /* Restore link speed advertisements and restart
1315                          * Auto-negotiation
1316                          */
1317                         ret_val = e1000_phy_setup_autoneg(hw);
1318                         if (ret_val)
1319                                 goto out;
1320
1321                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1322 #endif
1323                 }
1324
1325                 goto out;
1326         }
1327
1328         if (force)
1329                 /* Toggle LANPHYPC Value bit */
1330                 e1000_toggle_lanphypc_pch_lpt(hw);
1331
1332         ret_val = hw->phy.ops.acquire(hw);
1333         if (ret_val)
1334                 goto out;
1335
1336 #ifdef ULP_IN_D0_SUPPORT
1337         /* Revert the change to the 'Link Status Change'
1338          * interrupt to trigger on 'Cable Status Change'
1339          */
1340         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1341                                              &phy_reg);
1342         if (ret_val)
1343                 goto release;
1344         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1345         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1346
1347 #endif
1348         /* Unforce SMBus mode in PHY */
1349         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1350         if (ret_val) {
1351                 /* The MAC might be in PCIe mode, so temporarily force to
1352                  * SMBus mode in order to access the PHY.
1353                  */
1354                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1355                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1356                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1357
1358                 msec_delay(50);
1359
1360                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1361                                                        &phy_reg);
1362                 if (ret_val)
1363                         goto release;
1364         }
1365         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1366         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1367
1368         /* Unforce SMBus mode in MAC */
1369         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1370         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1371         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1372
1373         /* When ULP mode was previously entered, K1 was disabled by the
1374          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1375          */
1376         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1377         if (ret_val)
1378                 goto release;
1379         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1380         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1381
1382         /* Clear ULP enabled configuration */
1383         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1384         if (ret_val)
1385                 goto release;
1386 #ifdef ULP_IN_D0_SUPPORT
1387         /* CSC interrupt received due to ULP Indication */
1388         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1389 #endif
1390                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1391                              I218_ULP_CONFIG1_STICKY_ULP |
1392                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1393                              I218_ULP_CONFIG1_WOL_HOST |
1394                              I218_ULP_CONFIG1_INBAND_EXIT |
1395                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1396                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1397
1398                 /* Commit ULP changes by starting auto ULP configuration */
1399                 phy_reg |= I218_ULP_CONFIG1_START;
1400                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1401
1402                 /* Clear Disable SMBus Release on PERST# in MAC */
1403                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1404                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1405                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1406
1407 #ifdef ULP_IN_D0_SUPPORT
1408                 if (!force) {
1409                         hw->phy.ops.release(hw);
1410
1411                         if (hw->mac.autoneg)
1412                                 e1000_phy_setup_autoneg(hw);
1413
1414                         e1000_sw_lcd_config_ich8lan(hw);
1415
1416                         e1000_oem_bits_config_ich8lan(hw, true);
1417
1418                         /* Set ULP state to unknown and return non-zero to
1419                          * indicate no link (yet) and re-enter on the next LSC
1420                          * to finish disabling ULP flow.
1421                          */
1422                         hw->dev_spec.ich8lan.ulp_state =
1423                             e1000_ulp_state_unknown;
1424
1425                         return 1;
1426                 }
1427         }
1428
1429         /* Re-enable Tx */
1430         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1431         mac_reg |= E1000_TCTL_EN;
1432         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1433
1434 #endif
1435 release:
1436         hw->phy.ops.release(hw);
1437         if (force) {
1438                 hw->phy.ops.reset(hw);
1439                 msec_delay(50);
1440         }
1441 out:
1442         if (ret_val)
1443                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1444         else
1445                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1446
1447         return ret_val;
1448 }
1449
1450 #endif /* NAHUM6LP_HW && ULP_SUPPORT */
1451 /**
1452  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1453  *  @hw: pointer to the HW structure
1454  *
1455  *  Checks to see of the link status of the hardware has changed.  If a
1456  *  change in link status has been detected, then we read the PHY registers
1457  *  to get the current speed/duplex if link exists.
1458  **/
1459 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1460 {
1461         struct e1000_mac_info *mac = &hw->mac;
1462         s32 ret_val;
1463 #if defined(NAHUM6LP_HW) && defined(ULP_IN_D0_SUPPORT)
1464         bool link = false;
1465 #else
1466         bool link;
1467 #endif
1468         u16 phy_reg;
1469
1470         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1471
1472         /* We only want to go out to the PHY registers to see if Auto-Neg
1473          * has completed and/or if our link status has changed.  The
1474          * get_link_status flag is set upon receiving a Link Status
1475          * Change or Rx Sequence Error interrupt.
1476          */
1477         if (!mac->get_link_status)
1478                 return E1000_SUCCESS;
1479
1480 #if defined(NAHUM6LP_HW) && defined(ULP_IN_D0_SUPPORT)
1481         if ((hw->mac.type < e1000_pch_lpt) ||
1482             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1483             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1484 #endif /* NAHUM6LP_HW && ULP_IN_D0_SUPPORT */
1485                 /* First we want to see if the MII Status Register reports
1486                  * link.  If so, then we want to get the current speed/duplex
1487                  * of the PHY.
1488                  */
1489                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1490                 if (ret_val)
1491                         return ret_val;
1492 #if defined(NAHUM6LP_HW) && defined(ULP_IN_D0_SUPPORT)
1493         } else {
1494                 /* Check the MAC's STATUS register to determine link state
1495                  * since the PHY could be inaccessible while in ULP mode.
1496                  */
1497                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1498                 if (link)
1499                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1500                 else
1501                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1502
1503                 if (ret_val)
1504                         return ret_val;
1505         }
1506 #endif /* NAHUM6LP_HW && ULP_IN_D0_SUPPORT */
1507
1508         if (hw->mac.type == e1000_pchlan) {
1509                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1510                 if (ret_val)
1511                         return ret_val;
1512         }
1513
1514         /* When connected at 10Mbps half-duplex, 82579 parts are excessively
1515          * aggressive resulting in many collisions. To avoid this, increase
1516          * the IPG and reduce Rx latency in the PHY.
1517          */
1518         if ((hw->mac.type == e1000_pch2lan) && link) {
1519                 u32 reg;
1520                 reg = E1000_READ_REG(hw, E1000_STATUS);
1521                 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1522                         reg = E1000_READ_REG(hw, E1000_TIPG);
1523                         reg &= ~E1000_TIPG_IPGT_MASK;
1524                         reg |= 0xFF;
1525                         E1000_WRITE_REG(hw, E1000_TIPG, reg);
1526
1527                         /* Reduce Rx latency in analog PHY */
1528                         ret_val = hw->phy.ops.acquire(hw);
1529                         if (ret_val)
1530                                 return ret_val;
1531
1532                         ret_val = e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
1533
1534                         hw->phy.ops.release(hw);
1535
1536                         if (ret_val)
1537                                 return ret_val;
1538                 }
1539         }
1540
1541 #if defined(NAHUM6LP_HW) && defined(NAHUM6_WPT_HW)
1542         /* Work-around I218 hang issue */
1543         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1544             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1545             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1546             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1547                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1548                 if (ret_val)
1549                         return ret_val;
1550         }
1551 #else
1552         /* Work-around I218 hang issue */
1553         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1554             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
1555                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1556                 if (ret_val)
1557                         return ret_val;
1558         }
1559
1560 #endif /* defined(NAHUM6LP_HW) && defined(NAHUM6_WPT_HW) */
1561         /* Clear link partner's EEE ability */
1562         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1563
1564         if (!link)
1565                 return E1000_SUCCESS; /* No link detected */
1566
1567         mac->get_link_status = false;
1568
1569         switch (hw->mac.type) {
1570         case e1000_pch2lan:
1571                 ret_val = e1000_k1_workaround_lv(hw);
1572                 if (ret_val)
1573                         return ret_val;
1574                 /* fall-thru */
1575         case e1000_pchlan:
1576                 if (hw->phy.type == e1000_phy_82578) {
1577                         ret_val = e1000_link_stall_workaround_hv(hw);
1578                         if (ret_val)
1579                                 return ret_val;
1580                 }
1581
1582                 /* Workaround for PCHx parts in half-duplex:
1583                  * Set the number of preambles removed from the packet
1584                  * when it is passed from the PHY to the MAC to prevent
1585                  * the MAC from misinterpreting the packet type.
1586                  */
1587                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1588                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1589
1590                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1591                     E1000_STATUS_FD)
1592                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1593
1594                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1595                 break;
1596         default:
1597                 break;
1598         }
1599
1600         /* Check if there was DownShift, must be checked
1601          * immediately after link-up
1602          */
1603         e1000_check_downshift_generic(hw);
1604
1605         /* Enable/Disable EEE after link up */
1606         if (hw->phy.type > e1000_phy_82579) {
1607                 ret_val = e1000_set_eee_pchlan(hw);
1608                 if (ret_val)
1609                         return ret_val;
1610         }
1611
1612         /* If we are forcing speed/duplex, then we simply return since
1613          * we have already determined whether we have link or not.
1614          */
1615         if (!mac->autoneg)
1616                 return -E1000_ERR_CONFIG;
1617
1618         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1619          * of MAC speed/duplex configuration.  So we only need to
1620          * configure Collision Distance in the MAC.
1621          */
1622         mac->ops.config_collision_dist(hw);
1623
1624         /* Configure Flow Control now that Auto-Neg has completed.
1625          * First, we need to restore the desired flow control
1626          * settings because we may have had to re-autoneg with a
1627          * different link partner.
1628          */
1629         ret_val = e1000_config_fc_after_link_up_generic(hw);
1630         if (ret_val)
1631                 DEBUGOUT("Error configuring flow control\n");
1632
1633         return ret_val;
1634 }
1635
1636 /**
1637  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1638  *  @hw: pointer to the HW structure
1639  *
1640  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1641  **/
1642 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1643 {
1644         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1645
1646         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1647         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1648         switch (hw->mac.type) {
1649         case e1000_ich8lan:
1650         case e1000_ich9lan:
1651         case e1000_ich10lan:
1652                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1653                 break;
1654         case e1000_pchlan:
1655         case e1000_pch2lan:
1656         case e1000_pch_lpt:
1657                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1658                 break;
1659         default:
1660                 break;
1661         }
1662 }
1663
1664 /**
1665  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1666  *  @hw: pointer to the HW structure
1667  *
1668  *  Acquires the mutex for performing NVM operations.
1669  **/
1670 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1671 {
1672         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1673
1674         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1675
1676         return E1000_SUCCESS;
1677 }
1678
1679 /**
1680  *  e1000_release_nvm_ich8lan - Release NVM mutex
1681  *  @hw: pointer to the HW structure
1682  *
1683  *  Releases the mutex used while performing NVM operations.
1684  **/
1685 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1686 {
1687         DEBUGFUNC("e1000_release_nvm_ich8lan");
1688
1689         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1690
1691         return;
1692 }
1693
1694 /**
1695  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1696  *  @hw: pointer to the HW structure
1697  *
1698  *  Acquires the software control flag for performing PHY and select
1699  *  MAC CSR accesses.
1700  **/
1701 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1702 {
1703         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1704         s32 ret_val = E1000_SUCCESS;
1705
1706         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1707
1708         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1709
1710         while (timeout) {
1711                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1712                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1713                         break;
1714
1715                 msec_delay_irq(1);
1716                 timeout--;
1717         }
1718
1719         if (!timeout) {
1720                 DEBUGOUT("SW has already locked the resource.\n");
1721                 ret_val = -E1000_ERR_CONFIG;
1722                 goto out;
1723         }
1724
1725         timeout = SW_FLAG_TIMEOUT;
1726
1727         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1728         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1729
1730         while (timeout) {
1731                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1732                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1733                         break;
1734
1735                 msec_delay_irq(1);
1736                 timeout--;
1737         }
1738
1739         if (!timeout) {
1740                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1741                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1742                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1743                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1744                 ret_val = -E1000_ERR_CONFIG;
1745                 goto out;
1746         }
1747
1748 out:
1749         if (ret_val)
1750                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1751
1752         return ret_val;
1753 }
1754
1755 /**
1756  *  e1000_release_swflag_ich8lan - Release software control flag
1757  *  @hw: pointer to the HW structure
1758  *
1759  *  Releases the software control flag for performing PHY and select
1760  *  MAC CSR accesses.
1761  **/
1762 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1763 {
1764         u32 extcnf_ctrl;
1765
1766         DEBUGFUNC("e1000_release_swflag_ich8lan");
1767
1768         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1769
1770         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1771                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1772                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1773         } else {
1774                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1775         }
1776
1777         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1778
1779         return;
1780 }
1781
1782 /**
1783  *  e1000_check_mng_mode_ich8lan - Checks management mode
1784  *  @hw: pointer to the HW structure
1785  *
1786  *  This checks if the adapter has any manageability enabled.
1787  *  This is a function pointer entry point only called by read/write
1788  *  routines for the PHY and NVM parts.
1789  **/
1790 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1791 {
1792         u32 fwsm;
1793
1794         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1795
1796         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1797
1798         return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
1799                 ((fwsm & E1000_FWSM_MODE_MASK) ==
1800                  (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
1801 }
1802
1803 /**
1804  *  e1000_check_mng_mode_pchlan - Checks management mode
1805  *  @hw: pointer to the HW structure
1806  *
1807  *  This checks if the adapter has iAMT enabled.
1808  *  This is a function pointer entry point only called by read/write
1809  *  routines for the PHY and NVM parts.
1810  **/
1811 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1812 {
1813         u32 fwsm;
1814
1815         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1816
1817         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1818
1819         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1820                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1821 }
1822
1823 /**
1824  *  e1000_rar_set_pch2lan - Set receive address register
1825  *  @hw: pointer to the HW structure
1826  *  @addr: pointer to the receive address
1827  *  @index: receive address array register
1828  *
1829  *  Sets the receive address array register at index to the address passed
1830  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1831  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1832  *  Use SHRA[0-3] in place of those reserved for ME.
1833  **/
1834 STATIC void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1835 {
1836         u32 rar_low, rar_high;
1837
1838         DEBUGFUNC("e1000_rar_set_pch2lan");
1839
1840         /* HW expects these in little endian so we reverse the byte order
1841          * from network order (big endian) to little endian
1842          */
1843         rar_low = ((u32) addr[0] |
1844                    ((u32) addr[1] << 8) |
1845                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1846
1847         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1848
1849         /* If MAC address zero, no need to set the AV bit */
1850         if (rar_low || rar_high)
1851                 rar_high |= E1000_RAH_AV;
1852
1853         if (index == 0) {
1854                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1855                 E1000_WRITE_FLUSH(hw);
1856                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1857                 E1000_WRITE_FLUSH(hw);
1858                 return;
1859         }
1860
1861         /* RAR[1-6] are owned by manageability.  Skip those and program the
1862          * next address into the SHRA register array.
1863          */
1864         if (index < (u32) (hw->mac.rar_entry_count - 6)) {
1865                 s32 ret_val;
1866
1867                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1868                 if (ret_val)
1869                         goto out;
1870
1871                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1872                 E1000_WRITE_FLUSH(hw);
1873                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1874                 E1000_WRITE_FLUSH(hw);
1875
1876                 e1000_release_swflag_ich8lan(hw);
1877
1878                 /* verify the register updates */
1879                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1880                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1881                         return;
1882
1883                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1884                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1885         }
1886
1887 out:
1888         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1889 }
1890
1891 /**
1892  *  e1000_rar_set_pch_lpt - Set receive address registers
1893  *  @hw: pointer to the HW structure
1894  *  @addr: pointer to the receive address
1895  *  @index: receive address array register
1896  *
1897  *  Sets the receive address register array at index to the address passed
1898  *  in by addr. For LPT, RAR[0] is the base address register that is to
1899  *  contain the MAC address. SHRA[0-10] are the shared receive address
1900  *  registers that are shared between the Host and manageability engine (ME).
1901  **/
1902 STATIC void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1903 {
1904         u32 rar_low, rar_high;
1905         u32 wlock_mac;
1906
1907         DEBUGFUNC("e1000_rar_set_pch_lpt");
1908
1909         /* HW expects these in little endian so we reverse the byte order
1910          * from network order (big endian) to little endian
1911          */
1912         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1913                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1914
1915         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1916
1917         /* If MAC address zero, no need to set the AV bit */
1918         if (rar_low || rar_high)
1919                 rar_high |= E1000_RAH_AV;
1920
1921         if (index == 0) {
1922                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1923                 E1000_WRITE_FLUSH(hw);
1924                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1925                 E1000_WRITE_FLUSH(hw);
1926                 return;
1927         }
1928
1929         /* The manageability engine (ME) can lock certain SHRAR registers that
1930          * it is using - those registers are unavailable for use.
1931          */
1932         if (index < hw->mac.rar_entry_count) {
1933                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1934                             E1000_FWSM_WLOCK_MAC_MASK;
1935                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1936
1937                 /* Check if all SHRAR registers are locked */
1938                 if (wlock_mac == 1)
1939                         goto out;
1940
1941                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1942                         s32 ret_val;
1943
1944                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1945
1946                         if (ret_val)
1947                                 goto out;
1948
1949                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1950                                         rar_low);
1951                         E1000_WRITE_FLUSH(hw);
1952                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1953                                         rar_high);
1954                         E1000_WRITE_FLUSH(hw);
1955
1956                         e1000_release_swflag_ich8lan(hw);
1957
1958                         /* verify the register updates */
1959                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1960                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1961                                 return;
1962                 }
1963         }
1964
1965 out:
1966         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1967 }
1968
1969 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1970 /**
1971  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1972  *  @hw: pointer to the HW structure
1973  *  @mc_addr_list: array of multicast addresses to program
1974  *  @mc_addr_count: number of multicast addresses to program
1975  *
1976  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1977  *  The caller must have a packed mc_addr_list of multicast addresses.
1978  **/
1979 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1980                                               u8 *mc_addr_list,
1981                                               u32 mc_addr_count)
1982 {
1983         u16 phy_reg = 0;
1984         int i;
1985         s32 ret_val;
1986
1987         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1988
1989         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1990
1991         ret_val = hw->phy.ops.acquire(hw);
1992         if (ret_val)
1993                 return;
1994
1995         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1996         if (ret_val)
1997                 goto release;
1998
1999         for (i = 0; i < hw->mac.mta_reg_count; i++) {
2000                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2001                                            (u16)(hw->mac.mta_shadow[i] &
2002                                                  0xFFFF));
2003                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2004                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
2005                                                  0xFFFF));
2006         }
2007
2008         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2009
2010 release:
2011         hw->phy.ops.release(hw);
2012 }
2013
2014 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
2015 /**
2016  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2017  *  @hw: pointer to the HW structure
2018  *
2019  *  Checks if firmware is blocking the reset of the PHY.
2020  *  This is a function pointer entry point only called by
2021  *  reset routines.
2022  **/
2023 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2024 {
2025         u32 fwsm;
2026         bool blocked = false;
2027         int i = 0;
2028
2029         DEBUGFUNC("e1000_check_reset_block_ich8lan");
2030
2031         do {
2032                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2033                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2034                         blocked = true;
2035                         msec_delay(10);
2036                         continue;
2037                 }
2038                 blocked = false;
2039         } while (blocked && (i++ < 10));
2040         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2041 }
2042
2043 /**
2044  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2045  *  @hw: pointer to the HW structure
2046  *
2047  *  Assumes semaphore already acquired.
2048  *
2049  **/
2050 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2051 {
2052         u16 phy_data;
2053         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2054         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2055                 E1000_STRAP_SMT_FREQ_SHIFT;
2056         s32 ret_val;
2057
2058         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2059
2060         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2061         if (ret_val)
2062                 return ret_val;
2063
2064         phy_data &= ~HV_SMB_ADDR_MASK;
2065         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2066         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2067
2068         if (hw->phy.type == e1000_phy_i217) {
2069                 /* Restore SMBus frequency */
2070                 if (freq--) {
2071                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2072                         phy_data |= (freq & (1 << 0)) <<
2073                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2074                         phy_data |= (freq & (1 << 1)) <<
2075                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2076                 } else {
2077                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2078                 }
2079         }
2080
2081         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2082 }
2083
2084 /**
2085  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2086  *  @hw:   pointer to the HW structure
2087  *
2088  *  SW should configure the LCD from the NVM extended configuration region
2089  *  as a workaround for certain parts.
2090  **/
2091 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2092 {
2093         struct e1000_phy_info *phy = &hw->phy;
2094         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2095         s32 ret_val = E1000_SUCCESS;
2096         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2097
2098         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2099
2100         /* Initialize the PHY from the NVM on ICH platforms.  This
2101          * is needed due to an issue where the NVM configuration is
2102          * not properly autoloaded after power transitions.
2103          * Therefore, after each PHY reset, we will load the
2104          * configuration data out of the NVM manually.
2105          */
2106         switch (hw->mac.type) {
2107         case e1000_ich8lan:
2108                 if (phy->type != e1000_phy_igp_3)
2109                         return ret_val;
2110
2111                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2112                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2113                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2114                         break;
2115                 }
2116                 /* Fall-thru */
2117         case e1000_pchlan:
2118         case e1000_pch2lan:
2119         case e1000_pch_lpt:
2120                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2121                 break;
2122         default:
2123                 return ret_val;
2124         }
2125
2126         ret_val = hw->phy.ops.acquire(hw);
2127         if (ret_val)
2128                 return ret_val;
2129
2130         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2131         if (!(data & sw_cfg_mask))
2132                 goto release;
2133
2134         /* Make sure HW does not configure LCD from PHY
2135          * extended configuration before SW configuration
2136          */
2137         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2138         if ((hw->mac.type < e1000_pch2lan) &&
2139             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2140                         goto release;
2141
2142         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2143         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2144         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2145         if (!cnf_size)
2146                 goto release;
2147
2148         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2149         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2150
2151         if (((hw->mac.type == e1000_pchlan) &&
2152              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2153             (hw->mac.type > e1000_pchlan)) {
2154                 /* HW configures the SMBus address and LEDs when the
2155                  * OEM and LCD Write Enable bits are set in the NVM.
2156                  * When both NVM bits are cleared, SW will configure
2157                  * them instead.
2158                  */
2159                 ret_val = e1000_write_smbus_addr(hw);
2160                 if (ret_val)
2161                         goto release;
2162
2163                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2164                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2165                                                         (u16)data);
2166                 if (ret_val)
2167                         goto release;
2168         }
2169
2170         /* Configure LCD from extended configuration region. */
2171
2172         /* cnf_base_addr is in DWORD */
2173         word_addr = (u16)(cnf_base_addr << 1);
2174
2175         for (i = 0; i < cnf_size; i++) {
2176                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2177                                            &reg_data);
2178                 if (ret_val)
2179                         goto release;
2180
2181                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2182                                            1, &reg_addr);
2183                 if (ret_val)
2184                         goto release;
2185
2186                 /* Save off the PHY page for future writes. */
2187                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2188                         phy_page = reg_data;
2189                         continue;
2190                 }
2191
2192                 reg_addr &= PHY_REG_MASK;
2193                 reg_addr |= phy_page;
2194
2195                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2196                                                     reg_data);
2197                 if (ret_val)
2198                         goto release;
2199         }
2200
2201 release:
2202         hw->phy.ops.release(hw);
2203         return ret_val;
2204 }
2205
2206 /**
2207  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2208  *  @hw:   pointer to the HW structure
2209  *  @link: link up bool flag
2210  *
2211  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2212  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2213  *  If link is down, the function will restore the default K1 setting located
2214  *  in the NVM.
2215  **/
2216 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2217 {
2218         s32 ret_val = E1000_SUCCESS;
2219         u16 status_reg = 0;
2220         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2221
2222         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2223
2224         if (hw->mac.type != e1000_pchlan)
2225                 return E1000_SUCCESS;
2226
2227         /* Wrap the whole flow with the sw flag */
2228         ret_val = hw->phy.ops.acquire(hw);
2229         if (ret_val)
2230                 return ret_val;
2231
2232         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2233         if (link) {
2234                 if (hw->phy.type == e1000_phy_82578) {
2235                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2236                                                               &status_reg);
2237                         if (ret_val)
2238                                 goto release;
2239
2240                         status_reg &= (BM_CS_STATUS_LINK_UP |
2241                                        BM_CS_STATUS_RESOLVED |
2242                                        BM_CS_STATUS_SPEED_MASK);
2243
2244                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2245                                            BM_CS_STATUS_RESOLVED |
2246                                            BM_CS_STATUS_SPEED_1000))
2247                                 k1_enable = false;
2248                 }
2249
2250                 if (hw->phy.type == e1000_phy_82577) {
2251                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2252                                                               &status_reg);
2253                         if (ret_val)
2254                                 goto release;
2255
2256                         status_reg &= (HV_M_STATUS_LINK_UP |
2257                                        HV_M_STATUS_AUTONEG_COMPLETE |
2258                                        HV_M_STATUS_SPEED_MASK);
2259
2260                         if (status_reg == (HV_M_STATUS_LINK_UP |
2261                                            HV_M_STATUS_AUTONEG_COMPLETE |
2262                                            HV_M_STATUS_SPEED_1000))
2263                                 k1_enable = false;
2264                 }
2265
2266                 /* Link stall fix for link up */
2267                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2268                                                        0x0100);
2269                 if (ret_val)
2270                         goto release;
2271
2272         } else {
2273                 /* Link stall fix for link down */
2274                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2275                                                        0x4100);
2276                 if (ret_val)
2277                         goto release;
2278         }
2279
2280         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2281
2282 release:
2283         hw->phy.ops.release(hw);
2284
2285         return ret_val;
2286 }
2287
2288 /**
2289  *  e1000_configure_k1_ich8lan - Configure K1 power state
2290  *  @hw: pointer to the HW structure
2291  *  @enable: K1 state to configure
2292  *
2293  *  Configure the K1 power state based on the provided parameter.
2294  *  Assumes semaphore already acquired.
2295  *
2296  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2297  **/
2298 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2299 {
2300         s32 ret_val;
2301         u32 ctrl_reg = 0;
2302         u32 ctrl_ext = 0;
2303         u32 reg = 0;
2304         u16 kmrn_reg = 0;
2305
2306         DEBUGFUNC("e1000_configure_k1_ich8lan");
2307
2308         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2309                                              &kmrn_reg);
2310         if (ret_val)
2311                 return ret_val;
2312
2313         if (k1_enable)
2314                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2315         else
2316                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2317
2318         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2319                                               kmrn_reg);
2320         if (ret_val)
2321                 return ret_val;
2322
2323         usec_delay(20);
2324         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2325         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2326
2327         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2328         reg |= E1000_CTRL_FRCSPD;
2329         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2330
2331         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2332         E1000_WRITE_FLUSH(hw);
2333         usec_delay(20);
2334         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2335         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2336         E1000_WRITE_FLUSH(hw);
2337         usec_delay(20);
2338
2339         return E1000_SUCCESS;
2340 }
2341
2342 /**
2343  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2344  *  @hw:       pointer to the HW structure
2345  *  @d0_state: boolean if entering d0 or d3 device state
2346  *
2347  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2348  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2349  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2350  **/
2351 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2352 {
2353         s32 ret_val = 0;
2354         u32 mac_reg;
2355         u16 oem_reg;
2356
2357         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2358
2359         if (hw->mac.type < e1000_pchlan)
2360                 return ret_val;
2361
2362         ret_val = hw->phy.ops.acquire(hw);
2363         if (ret_val)
2364                 return ret_val;
2365
2366         if (hw->mac.type == e1000_pchlan) {
2367                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2368                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2369                         goto release;
2370         }
2371
2372         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2373         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2374                 goto release;
2375
2376         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2377
2378         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2379         if (ret_val)
2380                 goto release;
2381
2382         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2383
2384         if (d0_state) {
2385                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2386                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2387
2388                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2389                         oem_reg |= HV_OEM_BITS_LPLU;
2390         } else {
2391                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2392                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2393                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2394
2395                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2396                     E1000_PHY_CTRL_NOND0A_LPLU))
2397                         oem_reg |= HV_OEM_BITS_LPLU;
2398         }
2399
2400         /* Set Restart auto-neg to activate the bits */
2401         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2402             !hw->phy.ops.check_reset_block(hw))
2403                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2404
2405         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2406
2407 release:
2408         hw->phy.ops.release(hw);
2409
2410         return ret_val;
2411 }
2412
2413
2414 /**
2415  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2416  *  @hw:   pointer to the HW structure
2417  **/
2418 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2419 {
2420         s32 ret_val;
2421         u16 data;
2422
2423         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2424
2425         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2426         if (ret_val)
2427                 return ret_val;
2428
2429         data |= HV_KMRN_MDIO_SLOW;
2430
2431         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2432
2433         return ret_val;
2434 }
2435
2436 /**
2437  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2438  *  done after every PHY reset.
2439  **/
2440 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2441 {
2442         s32 ret_val = E1000_SUCCESS;
2443         u16 phy_data;
2444
2445         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2446
2447         if (hw->mac.type != e1000_pchlan)
2448                 return E1000_SUCCESS;
2449
2450         /* Set MDIO slow mode before any other MDIO access */
2451         if (hw->phy.type == e1000_phy_82577) {
2452                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2453                 if (ret_val)
2454                         return ret_val;
2455         }
2456
2457         if (((hw->phy.type == e1000_phy_82577) &&
2458              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2459             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2460                 /* Disable generation of early preamble */
2461                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2462                 if (ret_val)
2463                         return ret_val;
2464
2465                 /* Preamble tuning for SSC */
2466                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2467                                                 0xA204);
2468                 if (ret_val)
2469                         return ret_val;
2470         }
2471
2472         if (hw->phy.type == e1000_phy_82578) {
2473                 /* Return registers to default by doing a soft reset then
2474                  * writing 0x3140 to the control register.
2475                  */
2476                 if (hw->phy.revision < 2) {
2477                         e1000_phy_sw_reset_generic(hw);
2478                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2479                                                         0x3140);
2480                 }
2481         }
2482
2483         /* Select page 0 */
2484         ret_val = hw->phy.ops.acquire(hw);
2485         if (ret_val)
2486                 return ret_val;
2487
2488         hw->phy.addr = 1;
2489         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2490         hw->phy.ops.release(hw);
2491         if (ret_val)
2492                 return ret_val;
2493
2494         /* Configure the K1 Si workaround during phy reset assuming there is
2495          * link so that it disables K1 if link is in 1Gbps.
2496          */
2497         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2498         if (ret_val)
2499                 return ret_val;
2500
2501         /* Workaround for link disconnects on a busy hub in half duplex */
2502         ret_val = hw->phy.ops.acquire(hw);
2503         if (ret_val)
2504                 return ret_val;
2505         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2506         if (ret_val)
2507                 goto release;
2508         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2509                                                phy_data & 0x00FF);
2510         if (ret_val)
2511                 goto release;
2512
2513         /* set MSE higher to enable link to stay up when noise is high */
2514         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2515 release:
2516         hw->phy.ops.release(hw);
2517
2518         return ret_val;
2519 }
2520
2521 /**
2522  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2523  *  @hw:   pointer to the HW structure
2524  **/
2525 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2526 {
2527         u32 mac_reg;
2528         u16 i, phy_reg = 0;
2529         s32 ret_val;
2530
2531         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2532
2533         ret_val = hw->phy.ops.acquire(hw);
2534         if (ret_val)
2535                 return;
2536         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2537         if (ret_val)
2538                 goto release;
2539
2540         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2541         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2542                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2543                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2544                                            (u16)(mac_reg & 0xFFFF));
2545                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2546                                            (u16)((mac_reg >> 16) & 0xFFFF));
2547
2548                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2549                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2550                                            (u16)(mac_reg & 0xFFFF));
2551                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2552                                            (u16)((mac_reg & E1000_RAH_AV)
2553                                                  >> 16));
2554         }
2555
2556         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2557
2558 release:
2559         hw->phy.ops.release(hw);
2560 }
2561
2562 #ifndef CRC32_OS_SUPPORT
2563 static u32 e1000_calc_rx_da_crc(u8 mac[])
2564 {
2565         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2566         u32 i, j, mask, crc;
2567
2568         DEBUGFUNC("e1000_calc_rx_da_crc");
2569
2570         crc = 0xffffffff;
2571         for (i = 0; i < 6; i++) {
2572                 crc = crc ^ mac[i];
2573                 for (j = 8; j > 0; j--) {
2574                         mask = (crc & 1) * (-1);
2575                         crc = (crc >> 1) ^ (poly & mask);
2576                 }
2577         }
2578         return ~crc;
2579 }
2580
2581 #endif /* CRC32_OS_SUPPORT */
2582 /**
2583  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2584  *  with 82579 PHY
2585  *  @hw: pointer to the HW structure
2586  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2587  **/
2588 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2589 {
2590         s32 ret_val = E1000_SUCCESS;
2591         u16 phy_reg, data;
2592         u32 mac_reg;
2593         u16 i;
2594
2595         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2596
2597         if (hw->mac.type < e1000_pch2lan)
2598                 return E1000_SUCCESS;
2599
2600         /* disable Rx path while enabling/disabling workaround */
2601         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2602         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2603                                         phy_reg | (1 << 14));
2604         if (ret_val)
2605                 return ret_val;
2606
2607         if (enable) {
2608                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2609                  * SHRAL/H) and initial CRC values to the MAC
2610                  */
2611                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2612                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2613                         u32 addr_high, addr_low;
2614
2615                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2616                         if (!(addr_high & E1000_RAH_AV))
2617                                 continue;
2618                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2619                         mac_addr[0] = (addr_low & 0xFF);
2620                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2621                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2622                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2623                         mac_addr[4] = (addr_high & 0xFF);
2624                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2625
2626 #ifndef CRC32_OS_SUPPORT
2627                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2628                                         e1000_calc_rx_da_crc(mac_addr));
2629 #else /* CRC32_OS_SUPPORT */
2630                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2631                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2632 #endif /* CRC32_OS_SUPPORT */
2633                 }
2634
2635                 /* Write Rx addresses to the PHY */
2636                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2637
2638                 /* Enable jumbo frame workaround in the MAC */
2639                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2640                 mac_reg &= ~(1 << 14);
2641                 mac_reg |= (7 << 15);
2642                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2643
2644                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2645                 mac_reg |= E1000_RCTL_SECRC;
2646                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2647
2648                 ret_val = e1000_read_kmrn_reg_generic(hw,
2649                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2650                                                 &data);
2651                 if (ret_val)
2652                         return ret_val;
2653                 ret_val = e1000_write_kmrn_reg_generic(hw,
2654                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2655                                                 data | (1 << 0));
2656                 if (ret_val)
2657                         return ret_val;
2658                 ret_val = e1000_read_kmrn_reg_generic(hw,
2659                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2660                                                 &data);
2661                 if (ret_val)
2662                         return ret_val;
2663                 data &= ~(0xF << 8);
2664                 data |= (0xB << 8);
2665                 ret_val = e1000_write_kmrn_reg_generic(hw,
2666                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2667                                                 data);
2668                 if (ret_val)
2669                         return ret_val;
2670
2671                 /* Enable jumbo frame workaround in the PHY */
2672                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2673                 data &= ~(0x7F << 5);
2674                 data |= (0x37 << 5);
2675                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2676                 if (ret_val)
2677                         return ret_val;
2678                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2679                 data &= ~(1 << 13);
2680                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2681                 if (ret_val)
2682                         return ret_val;
2683                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2684                 data &= ~(0x3FF << 2);
2685                 data |= (0x1A << 2);
2686                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2687                 if (ret_val)
2688                         return ret_val;
2689                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2690                 if (ret_val)
2691                         return ret_val;
2692                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2693                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2694                                                 (1 << 10));
2695                 if (ret_val)
2696                         return ret_val;
2697         } else {
2698                 /* Write MAC register values back to h/w defaults */
2699                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2700                 mac_reg &= ~(0xF << 14);
2701                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2702
2703                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2704                 mac_reg &= ~E1000_RCTL_SECRC;
2705                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2706
2707                 ret_val = e1000_read_kmrn_reg_generic(hw,
2708                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2709                                                 &data);
2710                 if (ret_val)
2711                         return ret_val;
2712                 ret_val = e1000_write_kmrn_reg_generic(hw,
2713                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2714                                                 data & ~(1 << 0));
2715                 if (ret_val)
2716                         return ret_val;
2717                 ret_val = e1000_read_kmrn_reg_generic(hw,
2718                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2719                                                 &data);
2720                 if (ret_val)
2721                         return ret_val;
2722                 data &= ~(0xF << 8);
2723                 data |= (0xB << 8);
2724                 ret_val = e1000_write_kmrn_reg_generic(hw,
2725                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2726                                                 data);
2727                 if (ret_val)
2728                         return ret_val;
2729
2730                 /* Write PHY register values back to h/w defaults */
2731                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2732                 data &= ~(0x7F << 5);
2733                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2734                 if (ret_val)
2735                         return ret_val;
2736                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2737                 data |= (1 << 13);
2738                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2739                 if (ret_val)
2740                         return ret_val;
2741                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2742                 data &= ~(0x3FF << 2);
2743                 data |= (0x8 << 2);
2744                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2745                 if (ret_val)
2746                         return ret_val;
2747                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2748                 if (ret_val)
2749                         return ret_val;
2750                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2751                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2752                                                 ~(1 << 10));
2753                 if (ret_val)
2754                         return ret_val;
2755         }
2756
2757         /* re-enable Rx path after enabling/disabling workaround */
2758         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2759                                      ~(1 << 14));
2760 }
2761
2762 /**
2763  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2764  *  done after every PHY reset.
2765  **/
2766 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2767 {
2768         s32 ret_val = E1000_SUCCESS;
2769
2770         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2771
2772         if (hw->mac.type != e1000_pch2lan)
2773                 return E1000_SUCCESS;
2774
2775         /* Set MDIO slow mode before any other MDIO access */
2776         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2777         if (ret_val)
2778                 return ret_val;
2779
2780         ret_val = hw->phy.ops.acquire(hw);
2781         if (ret_val)
2782                 return ret_val;
2783         /* set MSE higher to enable link to stay up when noise is high */
2784         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2785         if (ret_val)
2786                 goto release;
2787         /* drop link after 5 times MSE threshold was reached */
2788         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2789 release:
2790         hw->phy.ops.release(hw);
2791
2792         return ret_val;
2793 }
2794
2795 /**
2796  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2797  *  @hw:   pointer to the HW structure
2798  *
2799  *  Workaround to set the K1 beacon duration for 82579 parts
2800  **/
2801 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2802 {
2803         s32 ret_val = E1000_SUCCESS;
2804         u16 status_reg = 0;
2805         u32 mac_reg;
2806         u16 phy_reg;
2807
2808         DEBUGFUNC("e1000_k1_workaround_lv");
2809
2810         if (hw->mac.type != e1000_pch2lan)
2811                 return E1000_SUCCESS;
2812
2813         /* Set K1 beacon duration based on 1Gbps speed or otherwise */
2814         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2815         if (ret_val)
2816                 return ret_val;
2817
2818         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2819             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2820                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2821                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2822
2823                 ret_val = hw->phy.ops.read_reg(hw, I82579_LPI_CTRL, &phy_reg);
2824                 if (ret_val)
2825                         return ret_val;
2826
2827                 if (status_reg & HV_M_STATUS_SPEED_1000) {
2828                         u16 pm_phy_reg;
2829
2830                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
2831                         phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2832                         /* LV 1G Packet drop issue wa  */
2833                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2834                                                        &pm_phy_reg);
2835                         if (ret_val)
2836                                 return ret_val;
2837                         pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2838                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2839                                                         pm_phy_reg);
2840                         if (ret_val)
2841                                 return ret_val;
2842                 } else {
2843                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2844                         phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2845                 }
2846                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2847                 ret_val = hw->phy.ops.write_reg(hw, I82579_LPI_CTRL, phy_reg);
2848         }
2849
2850         return ret_val;
2851 }
2852
2853 /**
2854  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2855  *  @hw:   pointer to the HW structure
2856  *  @gate: boolean set to true to gate, false to ungate
2857  *
2858  *  Gate/ungate the automatic PHY configuration via hardware; perform
2859  *  the configuration via software instead.
2860  **/
2861 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2862 {
2863         u32 extcnf_ctrl;
2864
2865         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2866
2867         if (hw->mac.type < e1000_pch2lan)
2868                 return;
2869
2870         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2871
2872         if (gate)
2873                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2874         else
2875                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2876
2877         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2878 }
2879
2880 /**
2881  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2882  *  @hw: pointer to the HW structure
2883  *
2884  *  Check the appropriate indication the MAC has finished configuring the
2885  *  PHY after a software reset.
2886  **/
2887 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2888 {
2889         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2890
2891         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2892
2893         /* Wait for basic configuration completes before proceeding */
2894         do {
2895                 data = E1000_READ_REG(hw, E1000_STATUS);
2896                 data &= E1000_STATUS_LAN_INIT_DONE;
2897                 usec_delay(100);
2898         } while ((!data) && --loop);
2899
2900         /* If basic configuration is incomplete before the above loop
2901          * count reaches 0, loading the configuration from NVM will
2902          * leave the PHY in a bad state possibly resulting in no link.
2903          */
2904         if (loop == 0)
2905                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2906
2907         /* Clear the Init Done bit for the next init event */
2908         data = E1000_READ_REG(hw, E1000_STATUS);
2909         data &= ~E1000_STATUS_LAN_INIT_DONE;
2910         E1000_WRITE_REG(hw, E1000_STATUS, data);
2911 }
2912
2913 /**
2914  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2915  *  @hw: pointer to the HW structure
2916  **/
2917 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2918 {
2919         s32 ret_val = E1000_SUCCESS;
2920         u16 reg;
2921
2922         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2923
2924         if (hw->phy.ops.check_reset_block(hw))
2925                 return E1000_SUCCESS;
2926
2927         /* Allow time for h/w to get to quiescent state after reset */
2928         msec_delay(10);
2929
2930         /* Perform any necessary post-reset workarounds */
2931         switch (hw->mac.type) {
2932         case e1000_pchlan:
2933                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2934                 if (ret_val)
2935                         return ret_val;
2936                 break;
2937         case e1000_pch2lan:
2938                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2939                 if (ret_val)
2940                         return ret_val;
2941                 break;
2942         default:
2943                 break;
2944         }
2945
2946         /* Clear the host wakeup bit after lcd reset */
2947         if (hw->mac.type >= e1000_pchlan) {
2948                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2949                 reg &= ~BM_WUC_HOST_WU_BIT;
2950                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2951         }
2952
2953         /* Configure the LCD with the extended configuration region in NVM */
2954         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2955         if (ret_val)
2956                 return ret_val;
2957
2958         /* Configure the LCD with the OEM bits in NVM */
2959         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2960
2961         if (hw->mac.type == e1000_pch2lan) {
2962                 /* Ungate automatic PHY configuration on non-managed 82579 */
2963                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2964                     E1000_ICH_FWSM_FW_VALID)) {
2965                         msec_delay(10);
2966                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2967                 }
2968
2969                 /* Set EEE LPI Update Timer to 200usec */
2970                 ret_val = hw->phy.ops.acquire(hw);
2971                 if (ret_val)
2972                         return ret_val;
2973                 ret_val = e1000_write_emi_reg_locked(hw,
2974                                                      I82579_LPI_UPDATE_TIMER,
2975                                                      0x1387);
2976                 hw->phy.ops.release(hw);
2977         }
2978
2979         return ret_val;
2980 }
2981
2982 /**
2983  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2984  *  @hw: pointer to the HW structure
2985  *
2986  *  Resets the PHY
2987  *  This is a function pointer entry point called by drivers
2988  *  or other shared routines.
2989  **/
2990 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2991 {
2992         s32 ret_val = E1000_SUCCESS;
2993
2994         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2995
2996         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2997         if ((hw->mac.type == e1000_pch2lan) &&
2998             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2999                 e1000_gate_hw_phy_config_ich8lan(hw, true);
3000
3001         ret_val = e1000_phy_hw_reset_generic(hw);
3002         if (ret_val)
3003                 return ret_val;
3004
3005         return e1000_post_phy_reset_ich8lan(hw);
3006 }
3007
3008 /**
3009  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3010  *  @hw: pointer to the HW structure
3011  *  @active: true to enable LPLU, false to disable
3012  *
3013  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3014  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3015  *  the phy speed. This function will manually set the LPLU bit and restart
3016  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3017  *  since it configures the same bit.
3018  **/
3019 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3020 {
3021         s32 ret_val;
3022         u16 oem_reg;
3023
3024         DEBUGFUNC("e1000_set_lplu_state_pchlan");
3025
3026         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3027         if (ret_val)
3028                 return ret_val;
3029
3030         if (active)
3031                 oem_reg |= HV_OEM_BITS_LPLU;
3032         else
3033                 oem_reg &= ~HV_OEM_BITS_LPLU;
3034
3035         if (!hw->phy.ops.check_reset_block(hw))
3036                 oem_reg |= HV_OEM_BITS_RESTART_AN;
3037
3038         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3039 }
3040
3041 /**
3042  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3043  *  @hw: pointer to the HW structure
3044  *  @active: true to enable LPLU, false to disable
3045  *
3046  *  Sets the LPLU D0 state according to the active flag.  When
3047  *  activating LPLU this function also disables smart speed
3048  *  and vice versa.  LPLU will not be activated unless the
3049  *  device autonegotiation advertisement meets standards of
3050  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3051  *  This is a function pointer entry point only called by
3052  *  PHY setup routines.
3053  **/
3054 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3055 {
3056         struct e1000_phy_info *phy = &hw->phy;
3057         u32 phy_ctrl;
3058         s32 ret_val = E1000_SUCCESS;
3059         u16 data;
3060
3061         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3062
3063         if (phy->type == e1000_phy_ife)
3064                 return E1000_SUCCESS;
3065
3066         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3067
3068         if (active) {
3069                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3070                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3071
3072                 if (phy->type != e1000_phy_igp_3)
3073                         return E1000_SUCCESS;
3074
3075                 /* Call gig speed drop workaround on LPLU before accessing
3076                  * any PHY registers
3077                  */
3078                 if (hw->mac.type == e1000_ich8lan)
3079                         e1000_gig_downshift_workaround_ich8lan(hw);
3080
3081                 /* When LPLU is enabled, we should disable SmartSpeed */
3082                 ret_val = phy->ops.read_reg(hw,
3083                                             IGP01E1000_PHY_PORT_CONFIG,
3084                                             &data);
3085                 if (ret_val)
3086                         return ret_val;
3087                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3088                 ret_val = phy->ops.write_reg(hw,
3089                                              IGP01E1000_PHY_PORT_CONFIG,
3090                                              data);
3091                 if (ret_val)
3092                         return ret_val;
3093         } else {
3094                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3095                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3096
3097                 if (phy->type != e1000_phy_igp_3)
3098                         return E1000_SUCCESS;
3099
3100                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3101                  * during Dx states where the power conservation is most
3102                  * important.  During driver activity we should enable
3103                  * SmartSpeed, so performance is maintained.
3104                  */
3105                 if (phy->smart_speed == e1000_smart_speed_on) {
3106                         ret_val = phy->ops.read_reg(hw,
3107                                                     IGP01E1000_PHY_PORT_CONFIG,
3108                                                     &data);
3109                         if (ret_val)
3110                                 return ret_val;
3111
3112                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3113                         ret_val = phy->ops.write_reg(hw,
3114                                                      IGP01E1000_PHY_PORT_CONFIG,
3115                                                      data);
3116                         if (ret_val)
3117                                 return ret_val;
3118                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3119                         ret_val = phy->ops.read_reg(hw,
3120                                                     IGP01E1000_PHY_PORT_CONFIG,
3121                                                     &data);
3122                         if (ret_val)
3123                                 return ret_val;
3124
3125                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3126                         ret_val = phy->ops.write_reg(hw,
3127                                                      IGP01E1000_PHY_PORT_CONFIG,
3128                                                      data);
3129                         if (ret_val)
3130                                 return ret_val;
3131                 }
3132         }
3133
3134         return E1000_SUCCESS;
3135 }
3136
3137 /**
3138  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3139  *  @hw: pointer to the HW structure
3140  *  @active: true to enable LPLU, false to disable
3141  *
3142  *  Sets the LPLU D3 state according to the active flag.  When
3143  *  activating LPLU this function also disables smart speed
3144  *  and vice versa.  LPLU will not be activated unless the
3145  *  device autonegotiation advertisement meets standards of
3146  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3147  *  This is a function pointer entry point only called by
3148  *  PHY setup routines.
3149  **/
3150 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3151 {
3152         struct e1000_phy_info *phy = &hw->phy;
3153         u32 phy_ctrl;
3154         s32 ret_val = E1000_SUCCESS;
3155         u16 data;
3156
3157         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3158
3159         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3160
3161         if (!active) {
3162                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3163                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3164
3165                 if (phy->type != e1000_phy_igp_3)
3166                         return E1000_SUCCESS;
3167
3168                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3169                  * during Dx states where the power conservation is most
3170                  * important.  During driver activity we should enable
3171                  * SmartSpeed, so performance is maintained.
3172                  */
3173                 if (phy->smart_speed == e1000_smart_speed_on) {
3174                         ret_val = phy->ops.read_reg(hw,
3175                                                     IGP01E1000_PHY_PORT_CONFIG,
3176                                                     &data);
3177                         if (ret_val)
3178                                 return ret_val;
3179
3180                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3181                         ret_val = phy->ops.write_reg(hw,
3182                                                      IGP01E1000_PHY_PORT_CONFIG,
3183                                                      data);
3184                         if (ret_val)
3185                                 return ret_val;
3186                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3187                         ret_val = phy->ops.read_reg(hw,
3188                                                     IGP01E1000_PHY_PORT_CONFIG,
3189                                                     &data);
3190                         if (ret_val)
3191                                 return ret_val;
3192
3193                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3194                         ret_val = phy->ops.write_reg(hw,
3195                                                      IGP01E1000_PHY_PORT_CONFIG,
3196                                                      data);
3197                         if (ret_val)
3198                                 return ret_val;
3199                 }
3200         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3201                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3202                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3203                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3204                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3205
3206                 if (phy->type != e1000_phy_igp_3)
3207                         return E1000_SUCCESS;
3208
3209                 /* Call gig speed drop workaround on LPLU before accessing
3210                  * any PHY registers
3211                  */
3212                 if (hw->mac.type == e1000_ich8lan)
3213                         e1000_gig_downshift_workaround_ich8lan(hw);
3214
3215                 /* When LPLU is enabled, we should disable SmartSpeed */
3216                 ret_val = phy->ops.read_reg(hw,
3217                                             IGP01E1000_PHY_PORT_CONFIG,
3218                                             &data);
3219                 if (ret_val)
3220                         return ret_val;
3221
3222                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3223                 ret_val = phy->ops.write_reg(hw,
3224                                              IGP01E1000_PHY_PORT_CONFIG,
3225                                              data);
3226         }
3227
3228         return ret_val;
3229 }
3230
3231 /**
3232  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3233  *  @hw: pointer to the HW structure
3234  *  @bank:  pointer to the variable that returns the active bank
3235  *
3236  *  Reads signature byte from the NVM using the flash access registers.
3237  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3238  **/
3239 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3240 {
3241         u32 eecd;
3242         struct e1000_nvm_info *nvm = &hw->nvm;
3243         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3244         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3245         u8 sig_byte = 0;
3246         s32 ret_val;
3247
3248         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3249
3250         switch (hw->mac.type) {
3251         case e1000_ich8lan:
3252         case e1000_ich9lan:
3253                 eecd = E1000_READ_REG(hw, E1000_EECD);
3254                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3255                     E1000_EECD_SEC1VAL_VALID_MASK) {
3256                         if (eecd & E1000_EECD_SEC1VAL)
3257                                 *bank = 1;
3258                         else
3259                                 *bank = 0;
3260
3261                         return E1000_SUCCESS;
3262                 }
3263                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3264                 /* fall-thru */
3265         default:
3266                 /* set bank to 0 in case flash read fails */
3267                 *bank = 0;
3268
3269                 /* Check bank 0 */
3270                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3271                                                         &sig_byte);
3272                 if (ret_val)
3273                         return ret_val;
3274                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3275                     E1000_ICH_NVM_SIG_VALUE) {
3276                         *bank = 0;
3277                         return E1000_SUCCESS;
3278                 }
3279
3280                 /* Check bank 1 */
3281                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3282                                                         bank1_offset,
3283                                                         &sig_byte);
3284                 if (ret_val)
3285                         return ret_val;
3286                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3287                     E1000_ICH_NVM_SIG_VALUE) {
3288                         *bank = 1;
3289                         return E1000_SUCCESS;
3290                 }
3291
3292                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3293                 return -E1000_ERR_NVM;
3294         }
3295 }
3296
3297 /**
3298  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3299  *  @hw: pointer to the HW structure
3300  *  @offset: The offset (in bytes) of the word(s) to read.
3301  *  @words: Size of data to read in words
3302  *  @data: Pointer to the word(s) to read at offset.
3303  *
3304  *  Reads a word(s) from the NVM using the flash access registers.
3305  **/
3306 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3307                                   u16 *data)
3308 {
3309         struct e1000_nvm_info *nvm = &hw->nvm;
3310         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3311         u32 act_offset;
3312         s32 ret_val = E1000_SUCCESS;
3313         u32 bank = 0;
3314         u16 i, word;
3315
3316         DEBUGFUNC("e1000_read_nvm_ich8lan");
3317
3318         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3319             (words == 0)) {
3320                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3321                 ret_val = -E1000_ERR_NVM;
3322                 goto out;
3323         }
3324
3325         nvm->ops.acquire(hw);
3326
3327         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3328         if (ret_val != E1000_SUCCESS) {
3329                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3330                 bank = 0;
3331         }
3332
3333         act_offset = (bank) ? nvm->flash_bank_size : 0;
3334         act_offset += offset;
3335
3336         ret_val = E1000_SUCCESS;
3337         for (i = 0; i < words; i++) {
3338                 if (dev_spec->shadow_ram[offset+i].modified) {
3339                         data[i] = dev_spec->shadow_ram[offset+i].value;
3340                 } else {
3341                         ret_val = e1000_read_flash_word_ich8lan(hw,
3342                                                                 act_offset + i,
3343                                                                 &word);
3344                         if (ret_val)
3345                                 break;
3346                         data[i] = word;
3347                 }
3348         }
3349
3350         nvm->ops.release(hw);
3351
3352 out:
3353         if (ret_val)
3354                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3355
3356         return ret_val;
3357 }
3358
3359 /**
3360  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3361  *  @hw: pointer to the HW structure
3362  *
3363  *  This function does initial flash setup so that a new read/write/erase cycle
3364  *  can be started.
3365  **/
3366 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3367 {
3368         union ich8_hws_flash_status hsfsts;
3369         s32 ret_val = -E1000_ERR_NVM;
3370
3371         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3372
3373         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3374
3375         /* Check if the flash descriptor is valid */
3376         if (!hsfsts.hsf_status.fldesvalid) {
3377                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3378                 return -E1000_ERR_NVM;
3379         }
3380
3381         /* Clear FCERR and DAEL in hw status by writing 1 */
3382         hsfsts.hsf_status.flcerr = 1;
3383         hsfsts.hsf_status.dael = 1;
3384
3385         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3386
3387         /* Either we should have a hardware SPI cycle in progress
3388          * bit to check against, in order to start a new cycle or
3389          * FDONE bit should be changed in the hardware so that it
3390          * is 1 after hardware reset, which can then be used as an
3391          * indication whether a cycle is in progress or has been
3392          * completed.
3393          */
3394
3395         if (!hsfsts.hsf_status.flcinprog) {
3396                 /* There is no cycle running at present,
3397                  * so we can start a cycle.
3398                  * Begin by setting Flash Cycle Done.
3399                  */
3400                 hsfsts.hsf_status.flcdone = 1;
3401                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3402                 ret_val = E1000_SUCCESS;
3403         } else {
3404                 s32 i;
3405
3406                 /* Otherwise poll for sometime so the current
3407                  * cycle has a chance to end before giving up.
3408                  */
3409                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3410                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3411                                                               ICH_FLASH_HSFSTS);
3412                         if (!hsfsts.hsf_status.flcinprog) {
3413                                 ret_val = E1000_SUCCESS;
3414                                 break;
3415                         }
3416                         usec_delay(1);
3417                 }
3418                 if (ret_val == E1000_SUCCESS) {
3419                         /* Successful in waiting for previous cycle to timeout,
3420                          * now set the Flash Cycle Done.
3421                          */
3422                         hsfsts.hsf_status.flcdone = 1;
3423                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3424                                                 hsfsts.regval);
3425                 } else {
3426                         DEBUGOUT("Flash controller busy, cannot get access\n");
3427                 }
3428         }
3429
3430         return ret_val;
3431 }
3432
3433 /**
3434  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3435  *  @hw: pointer to the HW structure
3436  *  @timeout: maximum time to wait for completion
3437  *
3438  *  This function starts a flash cycle and waits for its completion.
3439  **/
3440 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3441 {
3442         union ich8_hws_flash_ctrl hsflctl;
3443         union ich8_hws_flash_status hsfsts;
3444         u32 i = 0;
3445
3446         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3447
3448         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3449         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3450         hsflctl.hsf_ctrl.flcgo = 1;
3451         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3452
3453         /* wait till FDONE bit is set to 1 */
3454         do {
3455                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3456                 if (hsfsts.hsf_status.flcdone)
3457                         break;
3458                 usec_delay(1);
3459         } while (i++ < timeout);
3460
3461         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3462                 return E1000_SUCCESS;
3463
3464         return -E1000_ERR_NVM;
3465 }
3466
3467 /**
3468  *  e1000_read_flash_word_ich8lan - Read word from flash
3469  *  @hw: pointer to the HW structure
3470  *  @offset: offset to data location
3471  *  @data: pointer to the location for storing the data
3472  *
3473  *  Reads the flash word at offset into data.  Offset is converted
3474  *  to bytes before read.
3475  **/
3476 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3477                                          u16 *data)
3478 {
3479         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3480
3481         if (!data)
3482                 return -E1000_ERR_NVM;
3483
3484         /* Must convert offset into bytes. */
3485         offset <<= 1;
3486
3487         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3488 }
3489
3490 /**
3491  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3492  *  @hw: pointer to the HW structure
3493  *  @offset: The offset of the byte to read.
3494  *  @data: Pointer to a byte to store the value read.
3495  *
3496  *  Reads a single byte from the NVM using the flash access registers.
3497  **/
3498 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3499                                          u8 *data)
3500 {
3501         s32 ret_val;
3502         u16 word = 0;
3503
3504         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3505         if (ret_val)
3506                 return ret_val;
3507
3508         *data = (u8)word;
3509
3510         return E1000_SUCCESS;
3511 }
3512
3513 /**
3514  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3515  *  @hw: pointer to the HW structure
3516  *  @offset: The offset (in bytes) of the byte or word to read.
3517  *  @size: Size of data to read, 1=byte 2=word
3518  *  @data: Pointer to the word to store the value read.
3519  *
3520  *  Reads a byte or word from the NVM using the flash access registers.
3521  **/
3522 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3523                                          u8 size, u16 *data)
3524 {
3525         union ich8_hws_flash_status hsfsts;
3526         union ich8_hws_flash_ctrl hsflctl;
3527         u32 flash_linear_addr;
3528         u32 flash_data = 0;
3529         s32 ret_val = -E1000_ERR_NVM;
3530         u8 count = 0;
3531
3532         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3533
3534         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3535                 return -E1000_ERR_NVM;
3536
3537         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3538                              hw->nvm.flash_base_addr);
3539
3540         do {
3541                 usec_delay(1);
3542                 /* Steps */
3543                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3544                 if (ret_val != E1000_SUCCESS)
3545                         break;
3546
3547                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3548                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3549                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3550                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3551                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3552
3553                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3554
3555                 ret_val =
3556                     e1000_flash_cycle_ich8lan(hw,
3557                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
3558
3559                 /* Check if FCERR is set to 1, if set to 1, clear it
3560                  * and try the whole sequence a few more times, else
3561                  * read in (shift in) the Flash Data0, the order is
3562                  * least significant byte first msb to lsb
3563                  */
3564                 if (ret_val == E1000_SUCCESS) {
3565                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3566                         if (size == 1)
3567                                 *data = (u8)(flash_data & 0x000000FF);
3568                         else if (size == 2)
3569                                 *data = (u16)(flash_data & 0x0000FFFF);
3570                         break;
3571                 } else {
3572                         /* If we've gotten here, then things are probably
3573                          * completely hosed, but if the error condition is
3574                          * detected, it won't hurt to give it another try...
3575                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3576                          */
3577                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3578                                                               ICH_FLASH_HSFSTS);
3579                         if (hsfsts.hsf_status.flcerr) {
3580                                 /* Repeat for some time before giving up. */
3581                                 continue;
3582                         } else if (!hsfsts.hsf_status.flcdone) {
3583                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3584                                 break;
3585                         }
3586                 }
3587         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3588
3589         return ret_val;
3590 }
3591
3592 /**
3593  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3594  *  @hw: pointer to the HW structure
3595  *  @offset: The offset (in bytes) of the word(s) to write.
3596  *  @words: Size of data to write in words
3597  *  @data: Pointer to the word(s) to write at offset.
3598  *
3599  *  Writes a byte or word to the NVM using the flash access registers.
3600  **/
3601 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3602                                    u16 *data)
3603 {
3604         struct e1000_nvm_info *nvm = &hw->nvm;
3605         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3606         u16 i;
3607
3608         DEBUGFUNC("e1000_write_nvm_ich8lan");
3609
3610         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3611             (words == 0)) {
3612                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3613                 return -E1000_ERR_NVM;
3614         }
3615
3616         nvm->ops.acquire(hw);
3617
3618         for (i = 0; i < words; i++) {
3619                 dev_spec->shadow_ram[offset+i].modified = true;
3620                 dev_spec->shadow_ram[offset+i].value = data[i];
3621         }
3622
3623         nvm->ops.release(hw);
3624
3625         return E1000_SUCCESS;
3626 }
3627
3628 /**
3629  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3630  *  @hw: pointer to the HW structure
3631  *
3632  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3633  *  which writes the checksum to the shadow ram.  The changes in the shadow
3634  *  ram are then committed to the EEPROM by processing each bank at a time
3635  *  checking for the modified bit and writing only the pending changes.
3636  *  After a successful commit, the shadow ram is cleared and is ready for
3637  *  future writes.
3638  **/
3639 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3640 {
3641         struct e1000_nvm_info *nvm = &hw->nvm;
3642         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3643         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3644         s32 ret_val;
3645         u16 data;
3646
3647         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3648
3649         ret_val = e1000_update_nvm_checksum_generic(hw);
3650         if (ret_val)
3651                 goto out;
3652
3653         if (nvm->type != e1000_nvm_flash_sw)
3654                 goto out;
3655
3656         nvm->ops.acquire(hw);
3657
3658         /* We're writing to the opposite bank so if we're on bank 1,
3659          * write to bank 0 etc.  We also need to erase the segment that
3660          * is going to be written
3661          */
3662         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3663         if (ret_val != E1000_SUCCESS) {
3664                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3665                 bank = 0;
3666         }
3667
3668         if (bank == 0) {
3669                 new_bank_offset = nvm->flash_bank_size;
3670                 old_bank_offset = 0;
3671                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3672                 if (ret_val)
3673                         goto release;
3674         } else {
3675                 old_bank_offset = nvm->flash_bank_size;
3676                 new_bank_offset = 0;
3677                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3678                 if (ret_val)
3679                         goto release;
3680         }
3681
3682         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3683                 /* Determine whether to write the value stored
3684                  * in the other NVM bank or a modified value stored
3685                  * in the shadow RAM
3686                  */
3687                 if (dev_spec->shadow_ram[i].modified) {
3688                         data = dev_spec->shadow_ram[i].value;
3689                 } else {
3690                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3691                                                                 old_bank_offset,
3692                                                                 &data);
3693                         if (ret_val)
3694                                 break;
3695                 }
3696
3697                 /* If the word is 0x13, then make sure the signature bits
3698                  * (15:14) are 11b until the commit has completed.
3699                  * This will allow us to write 10b which indicates the
3700                  * signature is valid.  We want to do this after the write
3701                  * has completed so that we don't mark the segment valid
3702                  * while the write is still in progress
3703                  */
3704                 if (i == E1000_ICH_NVM_SIG_WORD)
3705                         data |= E1000_ICH_NVM_SIG_MASK;
3706
3707                 /* Convert offset to bytes. */
3708                 act_offset = (i + new_bank_offset) << 1;
3709
3710                 usec_delay(100);
3711                 /* Write the bytes to the new bank. */
3712                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3713                                                                act_offset,
3714                                                                (u8)data);
3715                 if (ret_val)
3716                         break;
3717
3718                 usec_delay(100);
3719                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3720                                                           act_offset + 1,
3721                                                           (u8)(data >> 8));
3722                 if (ret_val)
3723                         break;
3724         }
3725
3726         /* Don't bother writing the segment valid bits if sector
3727          * programming failed.
3728          */
3729         if (ret_val) {
3730                 DEBUGOUT("Flash commit failed.\n");
3731                 goto release;
3732         }
3733
3734         /* Finally validate the new segment by setting bit 15:14
3735          * to 10b in word 0x13 , this can be done without an
3736          * erase as well since these bits are 11 to start with
3737          * and we need to change bit 14 to 0b
3738          */
3739         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3740         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3741         if (ret_val)
3742                 goto release;
3743
3744         data &= 0xBFFF;
3745         ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3746                                                        act_offset * 2 + 1,
3747                                                        (u8)(data >> 8));
3748         if (ret_val)
3749                 goto release;
3750
3751         /* And invalidate the previously valid segment by setting
3752          * its signature word (0x13) high_byte to 0b. This can be
3753          * done without an erase because flash erase sets all bits
3754          * to 1's. We can write 1's to 0's without an erase
3755          */
3756         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3757         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3758         if (ret_val)
3759                 goto release;
3760
3761         /* Great!  Everything worked, we can now clear the cached entries. */
3762         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3763                 dev_spec->shadow_ram[i].modified = false;
3764                 dev_spec->shadow_ram[i].value = 0xFFFF;
3765         }
3766
3767 release:
3768         nvm->ops.release(hw);
3769
3770         /* Reload the EEPROM, or else modifications will not appear
3771          * until after the next adapter reset.
3772          */
3773         if (!ret_val) {
3774                 nvm->ops.reload(hw);
3775                 msec_delay(10);
3776         }
3777
3778 out:
3779         if (ret_val)
3780                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3781
3782         return ret_val;
3783 }
3784
3785 /**
3786  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3787  *  @hw: pointer to the HW structure
3788  *
3789  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3790  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3791  *  calculated, in which case we need to calculate the checksum and set bit 6.
3792  **/
3793 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3794 {
3795         s32 ret_val;
3796         u16 data;
3797         u16 word;
3798         u16 valid_csum_mask;
3799
3800         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3801
3802         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3803          * the checksum needs to be fixed.  This bit is an indication that
3804          * the NVM was prepared by OEM software and did not calculate
3805          * the checksum...a likely scenario.
3806          */
3807         switch (hw->mac.type) {
3808         case e1000_pch_lpt:
3809                 word = NVM_COMPAT;
3810                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3811                 break;
3812         default:
3813                 word = NVM_FUTURE_INIT_WORD1;
3814                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3815                 break;
3816         }
3817
3818         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3819         if (ret_val)
3820                 return ret_val;
3821
3822         if (!(data & valid_csum_mask)) {
3823                 data |= valid_csum_mask;
3824                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3825                 if (ret_val)
3826                         return ret_val;
3827                 ret_val = hw->nvm.ops.update(hw);
3828                 if (ret_val)
3829                         return ret_val;
3830         }
3831
3832         return e1000_validate_nvm_checksum_generic(hw);
3833 }
3834
3835 /**
3836  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3837  *  @hw: pointer to the HW structure
3838  *  @offset: The offset (in bytes) of the byte/word to read.
3839  *  @size: Size of data to read, 1=byte 2=word
3840  *  @data: The byte(s) to write to the NVM.
3841  *
3842  *  Writes one/two bytes to the NVM using the flash access registers.
3843  **/
3844 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3845                                           u8 size, u16 data)
3846 {
3847         union ich8_hws_flash_status hsfsts;
3848         union ich8_hws_flash_ctrl hsflctl;
3849         u32 flash_linear_addr;
3850         u32 flash_data = 0;
3851         s32 ret_val;
3852         u8 count = 0;
3853
3854         DEBUGFUNC("e1000_write_ich8_data");
3855
3856         if (size < 1 || size > 2 || data > size * 0xff ||
3857             offset > ICH_FLASH_LINEAR_ADDR_MASK)
3858                 return -E1000_ERR_NVM;
3859
3860         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3861                              hw->nvm.flash_base_addr);
3862
3863         do {
3864                 usec_delay(1);
3865                 /* Steps */
3866                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3867                 if (ret_val != E1000_SUCCESS)
3868                         break;
3869
3870                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3871                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3872                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3873                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3874                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3875
3876                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3877
3878                 if (size == 1)
3879                         flash_data = (u32)data & 0x00FF;
3880                 else
3881                         flash_data = (u32)data;
3882
3883                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3884
3885                 /* check if FCERR is set to 1 , if set to 1, clear it
3886                  * and try the whole sequence a few more times else done
3887                  */
3888                 ret_val =
3889                     e1000_flash_cycle_ich8lan(hw,
3890                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3891                 if (ret_val == E1000_SUCCESS)
3892                         break;
3893
3894                 /* If we're here, then things are most likely
3895                  * completely hosed, but if the error condition
3896                  * is detected, it won't hurt to give it another
3897                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3898                  */
3899                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3900                 if (hsfsts.hsf_status.flcerr)
3901                         /* Repeat for some time before giving up. */
3902                         continue;
3903                 if (!hsfsts.hsf_status.flcdone) {
3904                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3905                         break;
3906                 }
3907         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3908
3909         return ret_val;
3910 }
3911
3912 /**
3913  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3914  *  @hw: pointer to the HW structure
3915  *  @offset: The index of the byte to read.
3916  *  @data: The byte to write to the NVM.
3917  *
3918  *  Writes a single byte to the NVM using the flash access registers.
3919  **/
3920 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3921                                           u8 data)
3922 {
3923         u16 word = (u16)data;
3924
3925         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3926
3927         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3928 }
3929
3930 /**
3931  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3932  *  @hw: pointer to the HW structure
3933  *  @offset: The offset of the byte to write.
3934  *  @byte: The byte to write to the NVM.
3935  *
3936  *  Writes a single byte to the NVM using the flash access registers.
3937  *  Goes through a retry algorithm before giving up.
3938  **/
3939 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3940                                                 u32 offset, u8 byte)
3941 {
3942         s32 ret_val;
3943         u16 program_retries;
3944
3945         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3946
3947         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3948         if (!ret_val)
3949                 return ret_val;
3950
3951         for (program_retries = 0; program_retries < 100; program_retries++) {
3952                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3953                 usec_delay(100);
3954                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3955                 if (ret_val == E1000_SUCCESS)
3956                         break;
3957         }
3958         if (program_retries == 100)
3959                 return -E1000_ERR_NVM;
3960
3961         return E1000_SUCCESS;
3962 }
3963
3964 /**
3965  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3966  *  @hw: pointer to the HW structure
3967  *  @bank: 0 for first bank, 1 for second bank, etc.
3968  *
3969  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3970  *  bank N is 4096 * N + flash_reg_addr.
3971  **/
3972 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3973 {
3974         struct e1000_nvm_info *nvm = &hw->nvm;
3975         union ich8_hws_flash_status hsfsts;
3976         union ich8_hws_flash_ctrl hsflctl;
3977         u32 flash_linear_addr;
3978         /* bank size is in 16bit words - adjust to bytes */
3979         u32 flash_bank_size = nvm->flash_bank_size * 2;
3980         s32 ret_val;
3981         s32 count = 0;
3982         s32 j, iteration, sector_size;
3983
3984         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3985
3986         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3987
3988         /* Determine HW Sector size: Read BERASE bits of hw flash status
3989          * register
3990          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3991          *     consecutive sectors.  The start index for the nth Hw sector
3992          *     can be calculated as = bank * 4096 + n * 256
3993          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3994          *     The start index for the nth Hw sector can be calculated
3995          *     as = bank * 4096
3996          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3997          *     (ich9 only, otherwise error condition)
3998          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3999          */
4000         switch (hsfsts.hsf_status.berasesz) {
4001         case 0:
4002                 /* Hw sector size 256 */
4003                 sector_size = ICH_FLASH_SEG_SIZE_256;
4004                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4005                 break;
4006         case 1:
4007                 sector_size = ICH_FLASH_SEG_SIZE_4K;
4008                 iteration = 1;
4009                 break;
4010         case 2:
4011                 sector_size = ICH_FLASH_SEG_SIZE_8K;
4012                 iteration = 1;
4013                 break;
4014         case 3:
4015                 sector_size = ICH_FLASH_SEG_SIZE_64K;
4016                 iteration = 1;
4017                 break;
4018         default:
4019                 return -E1000_ERR_NVM;
4020         }
4021
4022         /* Start with the base address, then add the sector offset. */
4023         flash_linear_addr = hw->nvm.flash_base_addr;
4024         flash_linear_addr += (bank) ? flash_bank_size : 0;
4025
4026         for (j = 0; j < iteration; j++) {
4027                 do {
4028                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4029
4030                         /* Steps */
4031                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
4032                         if (ret_val)
4033                                 return ret_val;
4034
4035                         /* Write a value 11 (block Erase) in Flash
4036                          * Cycle field in hw flash control
4037                          */
4038                         hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4039                                                               ICH_FLASH_HSFCTL);
4040                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4041                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4042                                                 hsflctl.regval);
4043
4044                         /* Write the last 24 bits of an index within the
4045                          * block into Flash Linear address field in Flash
4046                          * Address.
4047                          */
4048                         flash_linear_addr += (j * sector_size);
4049                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4050                                               flash_linear_addr);
4051
4052                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4053                         if (ret_val == E1000_SUCCESS)
4054                                 break;
4055
4056                         /* Check if FCERR is set to 1.  If 1,
4057                          * clear it and try the whole sequence
4058                          * a few more times else Done
4059                          */
4060                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4061                                                       ICH_FLASH_HSFSTS);
4062                         if (hsfsts.hsf_status.flcerr)
4063                                 /* repeat for some time before giving up */
4064                                 continue;
4065                         else if (!hsfsts.hsf_status.flcdone)
4066                                 return ret_val;
4067                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4068         }
4069
4070         return E1000_SUCCESS;
4071 }
4072
4073 /**
4074  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4075  *  @hw: pointer to the HW structure
4076  *  @data: Pointer to the LED settings
4077  *
4078  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4079  *  settings is all 0's or F's, set the LED default to a valid LED default
4080  *  setting.
4081  **/
4082 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4083 {
4084         s32 ret_val;
4085
4086         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4087
4088         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4089         if (ret_val) {
4090                 DEBUGOUT("NVM Read Error\n");
4091                 return ret_val;
4092         }
4093
4094         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4095                 *data = ID_LED_DEFAULT_ICH8LAN;
4096
4097         return E1000_SUCCESS;
4098 }
4099
4100 /**
4101  *  e1000_id_led_init_pchlan - store LED configurations
4102  *  @hw: pointer to the HW structure
4103  *
4104  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4105  *  the PHY LED configuration register.
4106  *
4107  *  PCH also does not have an "always on" or "always off" mode which
4108  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4109  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4110  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4111  *  link based on logic in e1000_led_[on|off]_pchlan().
4112  **/
4113 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4114 {
4115         struct e1000_mac_info *mac = &hw->mac;
4116         s32 ret_val;
4117         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4118         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4119         u16 data, i, temp, shift;
4120
4121         DEBUGFUNC("e1000_id_led_init_pchlan");
4122
4123         /* Get default ID LED modes */
4124         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4125         if (ret_val)
4126                 return ret_val;
4127
4128         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4129         mac->ledctl_mode1 = mac->ledctl_default;
4130         mac->ledctl_mode2 = mac->ledctl_default;
4131
4132         for (i = 0; i < 4; i++) {
4133                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4134                 shift = (i * 5);
4135                 switch (temp) {
4136                 case ID_LED_ON1_DEF2:
4137                 case ID_LED_ON1_ON2:
4138                 case ID_LED_ON1_OFF2:
4139                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4140                         mac->ledctl_mode1 |= (ledctl_on << shift);
4141                         break;
4142                 case ID_LED_OFF1_DEF2:
4143                 case ID_LED_OFF1_ON2:
4144                 case ID_LED_OFF1_OFF2:
4145                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4146                         mac->ledctl_mode1 |= (ledctl_off << shift);
4147                         break;
4148                 default:
4149                         /* Do nothing */
4150                         break;
4151                 }
4152                 switch (temp) {
4153                 case ID_LED_DEF1_ON2:
4154                 case ID_LED_ON1_ON2:
4155                 case ID_LED_OFF1_ON2:
4156                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4157                         mac->ledctl_mode2 |= (ledctl_on << shift);
4158                         break;
4159                 case ID_LED_DEF1_OFF2:
4160                 case ID_LED_ON1_OFF2:
4161                 case ID_LED_OFF1_OFF2:
4162                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4163                         mac->ledctl_mode2 |= (ledctl_off << shift);
4164                         break;
4165                 default:
4166                         /* Do nothing */
4167                         break;
4168                 }
4169         }
4170
4171         return E1000_SUCCESS;
4172 }
4173
4174 /**
4175  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4176  *  @hw: pointer to the HW structure
4177  *
4178  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4179  *  register, so the the bus width is hard coded.
4180  **/
4181 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4182 {
4183         struct e1000_bus_info *bus = &hw->bus;
4184         s32 ret_val;
4185
4186         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4187
4188         ret_val = e1000_get_bus_info_pcie_generic(hw);
4189
4190         /* ICH devices are "PCI Express"-ish.  They have
4191          * a configuration space, but do not contain
4192          * PCI Express Capability registers, so bus width
4193          * must be hardcoded.
4194          */
4195         if (bus->width == e1000_bus_width_unknown)
4196                 bus->width = e1000_bus_width_pcie_x1;
4197
4198         return ret_val;
4199 }
4200
4201 /**
4202  *  e1000_reset_hw_ich8lan - Reset the hardware
4203  *  @hw: pointer to the HW structure
4204  *
4205  *  Does a full reset of the hardware which includes a reset of the PHY and
4206  *  MAC.
4207  **/
4208 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4209 {
4210         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4211         u16 kum_cfg;
4212         u32 ctrl, reg;
4213         s32 ret_val;
4214
4215         DEBUGFUNC("e1000_reset_hw_ich8lan");
4216
4217         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4218          * on the last TLP read/write transaction when MAC is reset.
4219          */
4220         ret_val = e1000_disable_pcie_master_generic(hw);
4221         if (ret_val)
4222                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4223
4224         DEBUGOUT("Masking off all interrupts\n");
4225         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4226
4227         /* Disable the Transmit and Receive units.  Then delay to allow
4228          * any pending transactions to complete before we hit the MAC
4229          * with the global reset.
4230          */
4231         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4232         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4233         E1000_WRITE_FLUSH(hw);
4234
4235         msec_delay(10);
4236
4237         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4238         if (hw->mac.type == e1000_ich8lan) {
4239                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4240                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4241                 /* Set Packet Buffer Size to 16k. */
4242                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4243         }
4244
4245         if (hw->mac.type == e1000_pchlan) {
4246                 /* Save the NVM K1 bit setting*/
4247                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4248                 if (ret_val)
4249                         return ret_val;
4250
4251                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4252                         dev_spec->nvm_k1_enabled = true;
4253                 else
4254                         dev_spec->nvm_k1_enabled = false;
4255         }
4256
4257         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4258
4259         if (!hw->phy.ops.check_reset_block(hw)) {
4260                 /* Full-chip reset requires MAC and PHY reset at the same
4261                  * time to make sure the interface between MAC and the
4262                  * external PHY is reset.
4263                  */
4264                 ctrl |= E1000_CTRL_PHY_RST;
4265
4266                 /* Gate automatic PHY configuration by hardware on
4267                  * non-managed 82579
4268                  */
4269                 if ((hw->mac.type == e1000_pch2lan) &&
4270                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4271                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4272         }
4273         ret_val = e1000_acquire_swflag_ich8lan(hw);
4274         DEBUGOUT("Issuing a global reset to ich8lan\n");
4275         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4276         /* cannot issue a flush here because it hangs the hardware */
4277         msec_delay(20);
4278
4279         /* Set Phy Config Counter to 50msec */
4280         if (hw->mac.type == e1000_pch2lan) {
4281                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4282                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4283                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4284                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4285         }
4286
4287         if (!ret_val)
4288                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4289
4290         if (ctrl & E1000_CTRL_PHY_RST) {
4291                 ret_val = hw->phy.ops.get_cfg_done(hw);
4292                 if (ret_val)
4293                         return ret_val;
4294
4295                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4296                 if (ret_val)
4297                         return ret_val;
4298         }
4299
4300         /* For PCH, this write will make sure that any noise
4301          * will be detected as a CRC error and be dropped rather than show up
4302          * as a bad packet to the DMA engine.
4303          */
4304         if (hw->mac.type == e1000_pchlan)
4305                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4306
4307         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4308         E1000_READ_REG(hw, E1000_ICR);
4309
4310         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4311         reg |= E1000_KABGTXD_BGSQLBIAS;
4312         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4313
4314         return E1000_SUCCESS;
4315 }
4316
4317 /**
4318  *  e1000_init_hw_ich8lan - Initialize the hardware
4319  *  @hw: pointer to the HW structure
4320  *
4321  *  Prepares the hardware for transmit and receive by doing the following:
4322  *   - initialize hardware bits
4323  *   - initialize LED identification
4324  *   - setup receive address registers
4325  *   - setup flow control
4326  *   - setup transmit descriptors
4327  *   - clear statistics
4328  **/
4329 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4330 {
4331         struct e1000_mac_info *mac = &hw->mac;
4332         u32 ctrl_ext, txdctl, snoop;
4333         s32 ret_val;
4334         u16 i;
4335
4336         DEBUGFUNC("e1000_init_hw_ich8lan");
4337
4338         e1000_initialize_hw_bits_ich8lan(hw);
4339
4340         /* Initialize identification LED */
4341         ret_val = mac->ops.id_led_init(hw);
4342         /* An error is not fatal and we should not stop init due to this */
4343         if (ret_val)
4344                 DEBUGOUT("Error initializing identification LED\n");
4345
4346         /* Setup the receive address. */
4347         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4348
4349         /* Zero out the Multicast HASH table */
4350         DEBUGOUT("Zeroing the MTA\n");
4351         for (i = 0; i < mac->mta_reg_count; i++)
4352                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4353
4354         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4355          * the ME.  Disable wakeup by clearing the host wakeup bit.
4356          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4357          */
4358         if (hw->phy.type == e1000_phy_82578) {
4359                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4360                 i &= ~BM_WUC_HOST_WU_BIT;
4361                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4362                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4363                 if (ret_val)
4364                         return ret_val;
4365         }
4366
4367         /* Setup link and flow control */
4368         ret_val = mac->ops.setup_link(hw);
4369
4370         /* Set the transmit descriptor write-back policy for both queues */
4371         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4372         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4373                   E1000_TXDCTL_FULL_TX_DESC_WB);
4374         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4375                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4376         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4377         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4378         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4379                   E1000_TXDCTL_FULL_TX_DESC_WB);
4380         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4381                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4382         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4383
4384         /* ICH8 has opposite polarity of no_snoop bits.
4385          * By default, we should use snoop behavior.
4386          */
4387         if (mac->type == e1000_ich8lan)
4388                 snoop = PCIE_ICH8_SNOOP_ALL;
4389         else
4390                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4391         e1000_set_pcie_no_snoop_generic(hw, snoop);
4392
4393         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4394         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4395         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4396
4397         /* Clear all of the statistics registers (clear on read).  It is
4398          * important that we do this after we have tried to establish link
4399          * because the symbol error count will increment wildly if there
4400          * is no link.
4401          */
4402         e1000_clear_hw_cntrs_ich8lan(hw);
4403
4404         return ret_val;
4405 }
4406
4407 /**
4408  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4409  *  @hw: pointer to the HW structure
4410  *
4411  *  Sets/Clears required hardware bits necessary for correctly setting up the
4412  *  hardware for transmit and receive.
4413  **/
4414 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4415 {
4416         u32 reg;
4417
4418         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4419
4420         /* Extended Device Control */
4421         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4422         reg |= (1 << 22);
4423         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4424         if (hw->mac.type >= e1000_pchlan)
4425                 reg |= E1000_CTRL_EXT_PHYPDEN;
4426         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4427
4428         /* Transmit Descriptor Control 0 */
4429         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4430         reg |= (1 << 22);
4431         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4432
4433         /* Transmit Descriptor Control 1 */
4434         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4435         reg |= (1 << 22);
4436         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4437
4438         /* Transmit Arbitration Control 0 */
4439         reg = E1000_READ_REG(hw, E1000_TARC(0));
4440         if (hw->mac.type == e1000_ich8lan)
4441                 reg |= (1 << 28) | (1 << 29);
4442         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4443         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4444
4445         /* Transmit Arbitration Control 1 */
4446         reg = E1000_READ_REG(hw, E1000_TARC(1));
4447         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4448                 reg &= ~(1 << 28);
4449         else
4450                 reg |= (1 << 28);
4451         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4452         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4453
4454         /* Device Status */
4455         if (hw->mac.type == e1000_ich8lan) {
4456                 reg = E1000_READ_REG(hw, E1000_STATUS);
4457                 reg &= ~(1 << 31);
4458                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4459         }
4460
4461         /* work-around descriptor data corruption issue during nfs v2 udp
4462          * traffic, just disable the nfs filtering capability
4463          */
4464         reg = E1000_READ_REG(hw, E1000_RFCTL);
4465         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4466
4467         /* Disable IPv6 extension header parsing because some malformed
4468          * IPv6 headers can hang the Rx.
4469          */
4470         if (hw->mac.type == e1000_ich8lan)
4471                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4472         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4473
4474         /* Enable ECC on Lynxpoint */
4475         if (hw->mac.type == e1000_pch_lpt) {
4476                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4477                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4478                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4479
4480                 reg = E1000_READ_REG(hw, E1000_CTRL);
4481                 reg |= E1000_CTRL_MEHE;
4482                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4483         }
4484
4485         return;
4486 }
4487
4488 /**
4489  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4490  *  @hw: pointer to the HW structure
4491  *
4492  *  Determines which flow control settings to use, then configures flow
4493  *  control.  Calls the appropriate media-specific link configuration
4494  *  function.  Assuming the adapter has a valid link partner, a valid link
4495  *  should be established.  Assumes the hardware has previously been reset
4496  *  and the transmitter and receiver are not enabled.
4497  **/
4498 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4499 {
4500         s32 ret_val;
4501
4502         DEBUGFUNC("e1000_setup_link_ich8lan");
4503
4504         if (hw->phy.ops.check_reset_block(hw))
4505                 return E1000_SUCCESS;
4506
4507         /* ICH parts do not have a word in the NVM to determine
4508          * the default flow control setting, so we explicitly
4509          * set it to full.
4510          */
4511         if (hw->fc.requested_mode == e1000_fc_default)
4512                 hw->fc.requested_mode = e1000_fc_full;
4513
4514         /* Save off the requested flow control mode for use later.  Depending
4515          * on the link partner's capabilities, we may or may not use this mode.
4516          */
4517         hw->fc.current_mode = hw->fc.requested_mode;
4518
4519         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4520                 hw->fc.current_mode);
4521
4522         /* Continue to configure the copper link. */
4523         ret_val = hw->mac.ops.setup_physical_interface(hw);
4524         if (ret_val)
4525                 return ret_val;
4526
4527         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4528         if ((hw->phy.type == e1000_phy_82578) ||
4529             (hw->phy.type == e1000_phy_82579) ||
4530             (hw->phy.type == e1000_phy_i217) ||
4531             (hw->phy.type == e1000_phy_82577)) {
4532                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4533
4534                 ret_val = hw->phy.ops.write_reg(hw,
4535                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4536                                              hw->fc.pause_time);
4537                 if (ret_val)
4538                         return ret_val;
4539         }
4540
4541         return e1000_set_fc_watermarks_generic(hw);
4542 }
4543
4544 /**
4545  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4546  *  @hw: pointer to the HW structure
4547  *
4548  *  Configures the kumeran interface to the PHY to wait the appropriate time
4549  *  when polling the PHY, then call the generic setup_copper_link to finish
4550  *  configuring the copper link.
4551  **/
4552 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4553 {
4554         u32 ctrl;
4555         s32 ret_val;
4556         u16 reg_data;
4557
4558         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4559
4560         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4561         ctrl |= E1000_CTRL_SLU;
4562         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4563         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4564
4565         /* Set the mac to wait the maximum time between each iteration
4566          * and increase the max iterations when polling the phy;
4567          * this fixes erroneous timeouts at 10Mbps.
4568          */
4569         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4570                                                0xFFFF);
4571         if (ret_val)
4572                 return ret_val;
4573         ret_val = e1000_read_kmrn_reg_generic(hw,
4574                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4575                                               &reg_data);
4576         if (ret_val)
4577                 return ret_val;
4578         reg_data |= 0x3F;
4579         ret_val = e1000_write_kmrn_reg_generic(hw,
4580                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4581                                                reg_data);
4582         if (ret_val)
4583                 return ret_val;
4584
4585         switch (hw->phy.type) {
4586         case e1000_phy_igp_3:
4587                 ret_val = e1000_copper_link_setup_igp(hw);
4588                 if (ret_val)
4589                         return ret_val;
4590                 break;
4591         case e1000_phy_bm:
4592         case e1000_phy_82578:
4593                 ret_val = e1000_copper_link_setup_m88(hw);
4594                 if (ret_val)
4595                         return ret_val;
4596                 break;
4597         case e1000_phy_82577:
4598         case e1000_phy_82579:
4599                 ret_val = e1000_copper_link_setup_82577(hw);
4600                 if (ret_val)
4601                         return ret_val;
4602                 break;
4603         case e1000_phy_ife:
4604                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4605                                                &reg_data);
4606                 if (ret_val)
4607                         return ret_val;
4608
4609                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4610
4611                 switch (hw->phy.mdix) {
4612                 case 1:
4613                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4614                         break;
4615                 case 2:
4616                         reg_data |= IFE_PMC_FORCE_MDIX;
4617                         break;
4618                 case 0:
4619                 default:
4620                         reg_data |= IFE_PMC_AUTO_MDIX;
4621                         break;
4622                 }
4623                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4624                                                 reg_data);
4625                 if (ret_val)
4626                         return ret_val;
4627                 break;
4628         default:
4629                 break;
4630         }
4631
4632         return e1000_setup_copper_link_generic(hw);
4633 }
4634
4635 /**
4636  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4637  *  @hw: pointer to the HW structure
4638  *
4639  *  Calls the PHY specific link setup function and then calls the
4640  *  generic setup_copper_link to finish configuring the link for
4641  *  Lynxpoint PCH devices
4642  **/
4643 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4644 {
4645         u32 ctrl;
4646         s32 ret_val;
4647
4648         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4649
4650         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4651         ctrl |= E1000_CTRL_SLU;
4652         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4653         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4654
4655         ret_val = e1000_copper_link_setup_82577(hw);
4656         if (ret_val)
4657                 return ret_val;
4658
4659         return e1000_setup_copper_link_generic(hw);
4660 }
4661
4662 /**
4663  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4664  *  @hw: pointer to the HW structure
4665  *  @speed: pointer to store current link speed
4666  *  @duplex: pointer to store the current link duplex
4667  *
4668  *  Calls the generic get_speed_and_duplex to retrieve the current link
4669  *  information and then calls the Kumeran lock loss workaround for links at
4670  *  gigabit speeds.
4671  **/
4672 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4673                                           u16 *duplex)
4674 {
4675         s32 ret_val;
4676
4677         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4678
4679         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4680         if (ret_val)
4681                 return ret_val;
4682
4683         if ((hw->mac.type == e1000_ich8lan) &&
4684             (hw->phy.type == e1000_phy_igp_3) &&
4685             (*speed == SPEED_1000)) {
4686                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4687         }
4688
4689         return ret_val;
4690 }
4691
4692 /**
4693  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4694  *  @hw: pointer to the HW structure
4695  *
4696  *  Work-around for 82566 Kumeran PCS lock loss:
4697  *  On link status change (i.e. PCI reset, speed change) and link is up and
4698  *  speed is gigabit-
4699  *    0) if workaround is optionally disabled do nothing
4700  *    1) wait 1ms for Kumeran link to come up
4701  *    2) check Kumeran Diagnostic register PCS lock loss bit
4702  *    3) if not set the link is locked (all is good), otherwise...
4703  *    4) reset the PHY
4704  *    5) repeat up to 10 times
4705  *  Note: this is only called for IGP3 copper when speed is 1gb.
4706  **/
4707 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4708 {
4709         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4710         u32 phy_ctrl;
4711         s32 ret_val;
4712         u16 i, data;
4713         bool link;
4714
4715         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4716
4717         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4718                 return E1000_SUCCESS;
4719
4720         /* Make sure link is up before proceeding.  If not just return.
4721          * Attempting this while link is negotiating fouled up link
4722          * stability
4723          */
4724         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4725         if (!link)
4726                 return E1000_SUCCESS;
4727
4728         for (i = 0; i < 10; i++) {
4729                 /* read once to clear */
4730                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4731                 if (ret_val)
4732                         return ret_val;
4733                 /* and again to get new status */
4734                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4735                 if (ret_val)
4736                         return ret_val;
4737
4738                 /* check for PCS lock */
4739                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4740                         return E1000_SUCCESS;
4741
4742                 /* Issue PHY reset */
4743                 hw->phy.ops.reset(hw);
4744                 msec_delay_irq(5);
4745         }
4746         /* Disable GigE link negotiation */
4747         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4748         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4749                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4750         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4751
4752         /* Call gig speed drop workaround on Gig disable before accessing
4753          * any PHY registers
4754          */
4755         e1000_gig_downshift_workaround_ich8lan(hw);
4756
4757         /* unable to acquire PCS lock */
4758         return -E1000_ERR_PHY;
4759 }
4760
4761 /**
4762  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4763  *  @hw: pointer to the HW structure
4764  *  @state: boolean value used to set the current Kumeran workaround state
4765  *
4766  *  If ICH8, set the current Kumeran workaround state (enabled - true
4767  *  /disabled - false).
4768  **/
4769 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4770                                                  bool state)
4771 {
4772         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4773
4774         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4775
4776         if (hw->mac.type != e1000_ich8lan) {
4777                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4778                 return;
4779         }
4780
4781         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4782
4783         return;
4784 }
4785
4786 /**
4787  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4788  *  @hw: pointer to the HW structure
4789  *
4790  *  Workaround for 82566 power-down on D3 entry:
4791  *    1) disable gigabit link
4792  *    2) write VR power-down enable
4793  *    3) read it back
4794  *  Continue if successful, else issue LCD reset and repeat
4795  **/
4796 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4797 {
4798         u32 reg;
4799         u16 data;
4800         u8  retry = 0;
4801
4802         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4803
4804         if (hw->phy.type != e1000_phy_igp_3)
4805                 return;
4806
4807         /* Try the workaround twice (if needed) */
4808         do {
4809                 /* Disable link */
4810                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4811                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4812                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4813                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4814
4815                 /* Call gig speed drop workaround on Gig disable before
4816                  * accessing any PHY registers
4817                  */
4818                 if (hw->mac.type == e1000_ich8lan)
4819                         e1000_gig_downshift_workaround_ich8lan(hw);
4820
4821                 /* Write VR power-down enable */
4822                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4823                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4824                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4825                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4826
4827                 /* Read it back and test */
4828                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4829                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4830                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4831                         break;
4832
4833                 /* Issue PHY reset and repeat at most one more time */
4834                 reg = E1000_READ_REG(hw, E1000_CTRL);
4835                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4836                 retry++;
4837         } while (retry);
4838 }
4839
4840 /**
4841  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4842  *  @hw: pointer to the HW structure
4843  *
4844  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4845  *  LPLU, Gig disable, MDIC PHY reset):
4846  *    1) Set Kumeran Near-end loopback
4847  *    2) Clear Kumeran Near-end loopback
4848  *  Should only be called for ICH8[m] devices with any 1G Phy.
4849  **/
4850 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4851 {
4852         s32 ret_val;
4853         u16 reg_data;
4854
4855         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4856
4857         if ((hw->mac.type != e1000_ich8lan) ||
4858             (hw->phy.type == e1000_phy_ife))
4859                 return;
4860
4861         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4862                                               &reg_data);
4863         if (ret_val)
4864                 return;
4865         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4866         ret_val = e1000_write_kmrn_reg_generic(hw,
4867                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4868                                                reg_data);
4869         if (ret_val)
4870                 return;
4871         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4872         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4873                                      reg_data);
4874 }
4875
4876 /**
4877  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4878  *  @hw: pointer to the HW structure
4879  *
4880  *  During S0 to Sx transition, it is possible the link remains at gig
4881  *  instead of negotiating to a lower speed.  Before going to Sx, set
4882  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4883  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4884  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4885  *  needs to be written.
4886  *  Parts that support (and are linked to a partner which support) EEE in
4887  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4888  *  than 10Mbps w/o EEE.
4889  **/
4890 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4891 {
4892         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4893         u32 phy_ctrl;
4894         s32 ret_val;
4895
4896         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4897
4898         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4899         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4900
4901         if (hw->phy.type == e1000_phy_i217) {
4902                 u16 phy_reg, device_id = hw->device_id;
4903
4904 #ifdef NAHUM6_WPT_HW
4905                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4906                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4907                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4908                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4909                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4910
4911                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4912                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4913                 }
4914 #else
4915                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4916                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4917                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4918
4919                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4920                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4921                 }
4922 #endif
4923
4924                 ret_val = hw->phy.ops.acquire(hw);
4925                 if (ret_val)
4926                         goto out;
4927
4928                 if (!dev_spec->eee_disable) {
4929                         u16 eee_advert;
4930
4931                         ret_val =
4932                             e1000_read_emi_reg_locked(hw,
4933                                                       I217_EEE_ADVERTISEMENT,
4934                                                       &eee_advert);
4935                         if (ret_val)
4936                                 goto release;
4937
4938                         /* Disable LPLU if both link partners support 100BaseT
4939                          * EEE and 100Full is advertised on both ends of the
4940                          * link, and enable Auto Enable LPI since there will
4941                          * be no driver to enable LPI while in Sx.
4942                          */
4943                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4944                             (dev_spec->eee_lp_ability &
4945                              I82579_EEE_100_SUPPORTED) &&
4946                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4947                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4948                                               E1000_PHY_CTRL_NOND0A_LPLU);
4949
4950                                 /* Set Auto Enable LPI after link up */
4951                                 hw->phy.ops.read_reg_locked(hw,
4952                                                             I217_LPI_GPIO_CTRL,
4953                                                             &phy_reg);
4954                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4955                                 hw->phy.ops.write_reg_locked(hw,
4956                                                              I217_LPI_GPIO_CTRL,
4957                                                              phy_reg);
4958                         }
4959                 }
4960
4961                 /* For i217 Intel Rapid Start Technology support,
4962                  * when the system is going into Sx and no manageability engine
4963                  * is present, the driver must configure proxy to reset only on
4964                  * power good.  LPI (Low Power Idle) state must also reset only
4965                  * on power good, as well as the MTA (Multicast table array).
4966                  * The SMBus release must also be disabled on LCD reset.
4967                  */
4968                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4969                         E1000_ICH_FWSM_FW_VALID)) {
4970                         /* Enable proxy to reset only on power good. */
4971                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4972                                                     &phy_reg);
4973                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4974                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4975                                                      phy_reg);
4976
4977                         /* Set bit enable LPI (EEE) to reset only on
4978                          * power good.
4979                         */
4980                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4981                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4982                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4983
4984                         /* Disable the SMB release on LCD reset. */
4985                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4986                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4987                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4988                 }
4989
4990                 /* Enable MTA to reset for Intel Rapid Start Technology
4991                  * Support
4992                  */
4993                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4994                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4995                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4996
4997 release:
4998                 hw->phy.ops.release(hw);
4999         }
5000 out:
5001         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5002
5003         if (hw->mac.type == e1000_ich8lan)
5004                 e1000_gig_downshift_workaround_ich8lan(hw);
5005
5006         if (hw->mac.type >= e1000_pchlan) {
5007                 e1000_oem_bits_config_ich8lan(hw, false);
5008
5009                 /* Reset PHY to activate OEM bits on 82577/8 */
5010                 if (hw->mac.type == e1000_pchlan)
5011                         e1000_phy_hw_reset_generic(hw);
5012
5013                 ret_val = hw->phy.ops.acquire(hw);
5014                 if (ret_val)
5015                         return;
5016                 e1000_write_smbus_addr(hw);
5017                 hw->phy.ops.release(hw);
5018         }
5019
5020         return;
5021 }
5022
5023 /**
5024  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5025  *  @hw: pointer to the HW structure
5026  *
5027  *  During Sx to S0 transitions on non-managed devices or managed devices
5028  *  on which PHY resets are not blocked, if the PHY registers cannot be
5029  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5030  *  the PHY.
5031  *  On i217, setup Intel Rapid Start Technology.
5032  **/
5033 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5034 {
5035         s32 ret_val;
5036
5037         DEBUGFUNC("e1000_resume_workarounds_pchlan");
5038
5039         if (hw->mac.type < e1000_pch2lan)
5040                 return;
5041
5042         ret_val = e1000_init_phy_workarounds_pchlan(hw);
5043         if (ret_val) {
5044                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5045                 return;
5046         }
5047
5048         /* For i217 Intel Rapid Start Technology support when the system
5049          * is transitioning from Sx and no manageability engine is present
5050          * configure SMBus to restore on reset, disable proxy, and enable
5051          * the reset on MTA (Multicast table array).
5052          */
5053         if (hw->phy.type == e1000_phy_i217) {
5054                 u16 phy_reg;
5055
5056                 ret_val = hw->phy.ops.acquire(hw);
5057                 if (ret_val) {
5058                         DEBUGOUT("Failed to setup iRST\n");
5059                         return;
5060                 }
5061
5062                 /* Clear Auto Enable LPI after link up */
5063                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5064                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5065                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5066
5067                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5068                     E1000_ICH_FWSM_FW_VALID)) {
5069                         /* Restore clear on SMB if no manageability engine
5070                          * is present
5071                          */
5072                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5073                                                               &phy_reg);
5074                         if (ret_val)
5075                                 goto release;
5076                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5077                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5078
5079                         /* Disable Proxy */
5080                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5081                 }
5082                 /* Enable reset on MTA */
5083                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5084                                                       &phy_reg);
5085                 if (ret_val)
5086                         goto release;
5087                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5088                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5089 release:
5090                 if (ret_val)
5091                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5092                 hw->phy.ops.release(hw);
5093         }
5094 }
5095
5096 /**
5097  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5098  *  @hw: pointer to the HW structure
5099  *
5100  *  Return the LED back to the default configuration.
5101  **/
5102 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5103 {
5104         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5105
5106         if (hw->phy.type == e1000_phy_ife)
5107                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5108                                              0);
5109
5110         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5111         return E1000_SUCCESS;
5112 }
5113
5114 /**
5115  *  e1000_led_on_ich8lan - Turn LEDs on
5116  *  @hw: pointer to the HW structure
5117  *
5118  *  Turn on the LEDs.
5119  **/
5120 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5121 {
5122         DEBUGFUNC("e1000_led_on_ich8lan");
5123
5124         if (hw->phy.type == e1000_phy_ife)
5125                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5126                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5127
5128         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5129         return E1000_SUCCESS;
5130 }
5131
5132 /**
5133  *  e1000_led_off_ich8lan - Turn LEDs off
5134  *  @hw: pointer to the HW structure
5135  *
5136  *  Turn off the LEDs.
5137  **/
5138 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5139 {
5140         DEBUGFUNC("e1000_led_off_ich8lan");
5141
5142         if (hw->phy.type == e1000_phy_ife)
5143                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5144                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5145
5146         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5147         return E1000_SUCCESS;
5148 }
5149
5150 /**
5151  *  e1000_setup_led_pchlan - Configures SW controllable LED
5152  *  @hw: pointer to the HW structure
5153  *
5154  *  This prepares the SW controllable LED for use.
5155  **/
5156 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5157 {
5158         DEBUGFUNC("e1000_setup_led_pchlan");
5159
5160         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5161                                      (u16)hw->mac.ledctl_mode1);
5162 }
5163
5164 /**
5165  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5166  *  @hw: pointer to the HW structure
5167  *
5168  *  Return the LED back to the default configuration.
5169  **/
5170 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5171 {
5172         DEBUGFUNC("e1000_cleanup_led_pchlan");
5173
5174         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5175                                      (u16)hw->mac.ledctl_default);
5176 }
5177
5178 /**
5179  *  e1000_led_on_pchlan - Turn LEDs on
5180  *  @hw: pointer to the HW structure
5181  *
5182  *  Turn on the LEDs.
5183  **/
5184 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5185 {
5186         u16 data = (u16)hw->mac.ledctl_mode2;
5187         u32 i, led;
5188
5189         DEBUGFUNC("e1000_led_on_pchlan");
5190
5191         /* If no link, then turn LED on by setting the invert bit
5192          * for each LED that's mode is "link_up" in ledctl_mode2.
5193          */
5194         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5195                 for (i = 0; i < 3; i++) {
5196                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5197                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5198                             E1000_LEDCTL_MODE_LINK_UP)
5199                                 continue;
5200                         if (led & E1000_PHY_LED0_IVRT)
5201                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5202                         else
5203                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5204                 }
5205         }
5206
5207         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5208 }
5209
5210 /**
5211  *  e1000_led_off_pchlan - Turn LEDs off
5212  *  @hw: pointer to the HW structure
5213  *
5214  *  Turn off the LEDs.
5215  **/
5216 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5217 {
5218         u16 data = (u16)hw->mac.ledctl_mode1;
5219         u32 i, led;
5220
5221         DEBUGFUNC("e1000_led_off_pchlan");
5222
5223         /* If no link, then turn LED off by clearing the invert bit
5224          * for each LED that's mode is "link_up" in ledctl_mode1.
5225          */
5226         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5227                 for (i = 0; i < 3; i++) {
5228                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5229                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5230                             E1000_LEDCTL_MODE_LINK_UP)
5231                                 continue;
5232                         if (led & E1000_PHY_LED0_IVRT)
5233                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5234                         else
5235                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5236                 }
5237         }
5238
5239         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5240 }
5241
5242 /**
5243  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5244  *  @hw: pointer to the HW structure
5245  *
5246  *  Read appropriate register for the config done bit for completion status
5247  *  and configure the PHY through s/w for EEPROM-less parts.
5248  *
5249  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5250  *  config done bit, so only an error is logged and continues.  If we were
5251  *  to return with error, EEPROM-less silicon would not be able to be reset
5252  *  or change link.
5253  **/
5254 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5255 {
5256         s32 ret_val = E1000_SUCCESS;
5257         u32 bank = 0;
5258         u32 status;
5259
5260         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5261
5262         e1000_get_cfg_done_generic(hw);
5263
5264         /* Wait for indication from h/w that it has completed basic config */
5265         if (hw->mac.type >= e1000_ich10lan) {
5266                 e1000_lan_init_done_ich8lan(hw);
5267         } else {
5268                 ret_val = e1000_get_auto_rd_done_generic(hw);
5269                 if (ret_val) {
5270                         /* When auto config read does not complete, do not
5271                          * return with an error. This can happen in situations
5272                          * where there is no eeprom and prevents getting link.
5273                          */
5274                         DEBUGOUT("Auto Read Done did not complete\n");
5275                         ret_val = E1000_SUCCESS;
5276                 }
5277         }
5278
5279         /* Clear PHY Reset Asserted bit */
5280         status = E1000_READ_REG(hw, E1000_STATUS);
5281         if (status & E1000_STATUS_PHYRA)
5282                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5283         else
5284                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5285
5286         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5287         if (hw->mac.type <= e1000_ich9lan) {
5288                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5289                     (hw->phy.type == e1000_phy_igp_3)) {
5290                         e1000_phy_init_script_igp3(hw);
5291                 }
5292         } else {
5293                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5294                         /* Maybe we should do a basic PHY config */
5295                         DEBUGOUT("EEPROM not present\n");
5296                         ret_val = -E1000_ERR_CONFIG;
5297                 }
5298         }
5299
5300         return ret_val;
5301 }
5302
5303 /**
5304  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5305  * @hw: pointer to the HW structure
5306  *
5307  * In the case of a PHY power down to save power, or to turn off link during a
5308  * driver unload, or wake on lan is not enabled, remove the link.
5309  **/
5310 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5311 {
5312         /* If the management interface is not enabled, then power down */
5313         if (!(hw->mac.ops.check_mng_mode(hw) ||
5314               hw->phy.ops.check_reset_block(hw)))
5315                 e1000_power_down_phy_copper(hw);
5316
5317         return;
5318 }
5319
5320 /**
5321  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5322  *  @hw: pointer to the HW structure
5323  *
5324  *  Clears hardware counters specific to the silicon family and calls
5325  *  clear_hw_cntrs_generic to clear all general purpose counters.
5326  **/
5327 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5328 {
5329         u16 phy_data;
5330         s32 ret_val;
5331
5332         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5333
5334         e1000_clear_hw_cntrs_base_generic(hw);
5335
5336         E1000_READ_REG(hw, E1000_ALGNERRC);
5337         E1000_READ_REG(hw, E1000_RXERRC);
5338         E1000_READ_REG(hw, E1000_TNCRS);
5339         E1000_READ_REG(hw, E1000_CEXTERR);
5340         E1000_READ_REG(hw, E1000_TSCTC);
5341         E1000_READ_REG(hw, E1000_TSCTFC);
5342
5343         E1000_READ_REG(hw, E1000_MGTPRC);
5344         E1000_READ_REG(hw, E1000_MGTPDC);
5345         E1000_READ_REG(hw, E1000_MGTPTC);
5346
5347         E1000_READ_REG(hw, E1000_IAC);
5348         E1000_READ_REG(hw, E1000_ICRXOC);
5349
5350         /* Clear PHY statistics registers */
5351         if ((hw->phy.type == e1000_phy_82578) ||
5352             (hw->phy.type == e1000_phy_82579) ||
5353             (hw->phy.type == e1000_phy_i217) ||
5354             (hw->phy.type == e1000_phy_82577)) {
5355                 ret_val = hw->phy.ops.acquire(hw);
5356                 if (ret_val)
5357                         return;
5358                 ret_val = hw->phy.ops.set_page(hw,
5359                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5360                 if (ret_val)
5361                         goto release;
5362                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5363                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5364                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5365                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5366                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5367                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5368                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5369                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5370                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5371                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5372                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5373                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5374                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5375                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5376 release:
5377                 hw->phy.ops.release(hw);
5378         }
5379 }
5380