e1000/base: fix K1 configuration
[dpdk.git] / drivers / net / e1000 / base / e1000_ich8lan.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 /* 82562G 10/100 Network Connection
35  * 82562G-2 10/100 Network Connection
36  * 82562GT 10/100 Network Connection
37  * 82562GT-2 10/100 Network Connection
38  * 82562V 10/100 Network Connection
39  * 82562V-2 10/100 Network Connection
40  * 82566DC-2 Gigabit Network Connection
41  * 82566DC Gigabit Network Connection
42  * 82566DM-2 Gigabit Network Connection
43  * 82566DM Gigabit Network Connection
44  * 82566MC Gigabit Network Connection
45  * 82566MM Gigabit Network Connection
46  * 82567LM Gigabit Network Connection
47  * 82567LF Gigabit Network Connection
48  * 82567V Gigabit Network Connection
49  * 82567LM-2 Gigabit Network Connection
50  * 82567LF-2 Gigabit Network Connection
51  * 82567V-2 Gigabit Network Connection
52  * 82567LF-3 Gigabit Network Connection
53  * 82567LM-3 Gigabit Network Connection
54  * 82567LM-4 Gigabit Network Connection
55  * 82577LM Gigabit Network Connection
56  * 82577LC Gigabit Network Connection
57  * 82578DM Gigabit Network Connection
58  * 82578DC Gigabit Network Connection
59  * 82579LM Gigabit Network Connection
60  * 82579V Gigabit Network Connection
61  * Ethernet Connection I217-LM
62  * Ethernet Connection I217-V
63  * Ethernet Connection I218-V
64  * Ethernet Connection I218-LM
65  * Ethernet Connection (2) I218-LM
66  * Ethernet Connection (2) I218-V
67  * Ethernet Connection (3) I218-LM
68  * Ethernet Connection (3) I218-V
69  */
70
71 #include "e1000_api.h"
72
73 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74 STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85                                               u8 *mc_addr_list,
86                                               u32 mc_addr_count);
87 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88 STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89 STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90 STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91 STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92                                             bool active);
93 STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94                                             bool active);
95 STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96                                    u16 words, u16 *data);
97 STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98                                     u16 words, u16 *data);
99 STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
102                                             u16 *data);
103 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
104 STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
105 STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
106 STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
107 STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
108 STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
109 STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
110 STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
111                                            u16 *speed, u16 *duplex);
112 STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
113 STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
114 STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
115 STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
116 STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
117 STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
118 STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
119 STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
120 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
121 STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
122 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
123 STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
124 STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
125                                           u32 offset, u8 *data);
126 STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
127                                           u8 size, u16 *data);
128 STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
129                                           u32 offset, u16 *data);
130 STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
131                                                  u32 offset, u8 byte);
132 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
133 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
134 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
135 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
136 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
137 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
138
139 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
140 /* Offset 04h HSFSTS */
141 union ich8_hws_flash_status {
142         struct ich8_hsfsts {
143                 u16 flcdone:1; /* bit 0 Flash Cycle Done */
144                 u16 flcerr:1; /* bit 1 Flash Cycle Error */
145                 u16 dael:1; /* bit 2 Direct Access error Log */
146                 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
147                 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
148                 u16 reserved1:2; /* bit 13:6 Reserved */
149                 u16 reserved2:6; /* bit 13:6 Reserved */
150                 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
151                 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
152         } hsf_status;
153         u16 regval;
154 };
155
156 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
157 /* Offset 06h FLCTL */
158 union ich8_hws_flash_ctrl {
159         struct ich8_hsflctl {
160                 u16 flcgo:1;   /* 0 Flash Cycle Go */
161                 u16 flcycle:2;   /* 2:1 Flash Cycle */
162                 u16 reserved:5;   /* 7:3 Reserved  */
163                 u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
164                 u16 flockdn:6;   /* 15:10 Reserved */
165         } hsf_ctrl;
166         u16 regval;
167 };
168
169 /* ICH Flash Region Access Permissions */
170 union ich8_hws_flash_regacc {
171         struct ich8_flracc {
172                 u32 grra:8; /* 0:7 GbE region Read Access */
173                 u32 grwa:8; /* 8:15 GbE region Write Access */
174                 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
175                 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
176         } hsf_flregacc;
177         u16 regval;
178 };
179
180 /**
181  *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
182  *  @hw: pointer to the HW structure
183  *
184  *  Test access to the PHY registers by reading the PHY ID registers.  If
185  *  the PHY ID is already known (e.g. resume path) compare it with known ID,
186  *  otherwise assume the read PHY ID is correct if it is valid.
187  *
188  *  Assumes the sw/fw/hw semaphore is already acquired.
189  **/
190 STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
191 {
192         u16 phy_reg = 0;
193         u32 phy_id = 0;
194         s32 ret_val = 0;
195         u16 retry_count;
196         u32 mac_reg = 0;
197
198         for (retry_count = 0; retry_count < 2; retry_count++) {
199                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
200                 if (ret_val || (phy_reg == 0xFFFF))
201                         continue;
202                 phy_id = (u32)(phy_reg << 16);
203
204                 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
205                 if (ret_val || (phy_reg == 0xFFFF)) {
206                         phy_id = 0;
207                         continue;
208                 }
209                 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
210                 break;
211         }
212
213         if (hw->phy.id) {
214                 if  (hw->phy.id == phy_id)
215                         goto out;
216         } else if (phy_id) {
217                 hw->phy.id = phy_id;
218                 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
219                 goto out;
220         }
221
222         /* In case the PHY needs to be in mdio slow mode,
223          * set slow mode and try to get the PHY id again.
224          */
225         if (hw->mac.type < e1000_pch_lpt) {
226                 hw->phy.ops.release(hw);
227                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
228                 if (!ret_val)
229                         ret_val = e1000_get_phy_id(hw);
230                 hw->phy.ops.acquire(hw);
231         }
232
233         if (ret_val)
234                 return false;
235 out:
236         if (hw->mac.type == e1000_pch_lpt) {
237                 /* Unforce SMBus mode in PHY */
238                 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
239                 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
240                 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
241
242                 /* Unforce SMBus mode in MAC */
243                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
244                 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
245                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
246         }
247
248         return true;
249 }
250
251 /**
252  *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
253  *  @hw: pointer to the HW structure
254  *
255  *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
256  *  used to reset the PHY to a quiescent state when necessary.
257  **/
258 STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
259 {
260         u32 mac_reg;
261
262         DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
263
264         /* Set Phy Config Counter to 50msec */
265         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
266         mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
267         mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
268         E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
269
270         /* Toggle LANPHYPC Value bit */
271         mac_reg = E1000_READ_REG(hw, E1000_CTRL);
272         mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
273         mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
274         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
275         E1000_WRITE_FLUSH(hw);
276         usec_delay(10);
277         mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
278         E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
279         E1000_WRITE_FLUSH(hw);
280
281         if (hw->mac.type < e1000_pch_lpt) {
282                 msec_delay(50);
283         } else {
284                 u16 count = 20;
285
286                 do {
287                         msec_delay(5);
288                 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
289                            E1000_CTRL_EXT_LPCD) && count--);
290
291                 msec_delay(30);
292         }
293 }
294
295 /**
296  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
297  *  @hw: pointer to the HW structure
298  *
299  *  Workarounds/flow necessary for PHY initialization during driver load
300  *  and resume paths.
301  **/
302 STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
303 {
304         u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
305         s32 ret_val;
306
307         DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
308
309         /* Gate automatic PHY configuration by hardware on managed and
310          * non-managed 82579 and newer adapters.
311          */
312         e1000_gate_hw_phy_config_ich8lan(hw, true);
313
314 #ifdef ULP_SUPPORT
315         /* It is not possible to be certain of the current state of ULP
316          * so forcibly disable it.
317          */
318         hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
319
320 #endif /* ULP_SUPPORT */
321         ret_val = hw->phy.ops.acquire(hw);
322         if (ret_val) {
323                 DEBUGOUT("Failed to initialize PHY flow\n");
324                 goto out;
325         }
326
327         /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
328          * inaccessible and resetting the PHY is not blocked, toggle the
329          * LANPHYPC Value bit to force the interconnect to PCIe mode.
330          */
331         switch (hw->mac.type) {
332         case e1000_pch_lpt:
333                 if (e1000_phy_is_accessible_pchlan(hw))
334                         break;
335
336                 /* Before toggling LANPHYPC, see if PHY is accessible by
337                  * forcing MAC to SMBus mode first.
338                  */
339                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
340                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
341                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
342
343                 /* Wait 50 milliseconds for MAC to finish any retries
344                  * that it might be trying to perform from previous
345                  * attempts to acknowledge any phy read requests.
346                  */
347                  msec_delay(50);
348
349                 /* fall-through */
350         case e1000_pch2lan:
351                 if (e1000_phy_is_accessible_pchlan(hw))
352                         break;
353
354                 /* fall-through */
355         case e1000_pchlan:
356                 if ((hw->mac.type == e1000_pchlan) &&
357                     (fwsm & E1000_ICH_FWSM_FW_VALID))
358                         break;
359
360                 if (hw->phy.ops.check_reset_block(hw)) {
361                         DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
362                         ret_val = -E1000_ERR_PHY;
363                         break;
364                 }
365
366                 /* Toggle LANPHYPC Value bit */
367                 e1000_toggle_lanphypc_pch_lpt(hw);
368                 if (hw->mac.type >= e1000_pch_lpt) {
369                         if (e1000_phy_is_accessible_pchlan(hw))
370                                 break;
371
372                         /* Toggling LANPHYPC brings the PHY out of SMBus mode
373                          * so ensure that the MAC is also out of SMBus mode
374                          */
375                         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
376                         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
377                         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
378
379                         if (e1000_phy_is_accessible_pchlan(hw))
380                                 break;
381
382                         ret_val = -E1000_ERR_PHY;
383                 }
384                 break;
385         default:
386                 break;
387         }
388
389         hw->phy.ops.release(hw);
390         if (!ret_val) {
391
392                 /* Check to see if able to reset PHY.  Print error if not */
393                 if (hw->phy.ops.check_reset_block(hw)) {
394                         ERROR_REPORT("Reset blocked by ME\n");
395                         goto out;
396                 }
397
398                 /* Reset the PHY before any access to it.  Doing so, ensures
399                  * that the PHY is in a known good state before we read/write
400                  * PHY registers.  The generic reset is sufficient here,
401                  * because we haven't determined the PHY type yet.
402                  */
403                 ret_val = e1000_phy_hw_reset_generic(hw);
404                 if (ret_val)
405                         goto out;
406
407                 /* On a successful reset, possibly need to wait for the PHY
408                  * to quiesce to an accessible state before returning control
409                  * to the calling function.  If the PHY does not quiesce, then
410                  * return E1000E_BLK_PHY_RESET, as this is the condition that
411                  *  the PHY is in.
412                  */
413                 ret_val = hw->phy.ops.check_reset_block(hw);
414                 if (ret_val)
415                         ERROR_REPORT("ME blocked access to PHY after reset\n");
416         }
417
418 out:
419         /* Ungate automatic PHY configuration on non-managed 82579 */
420         if ((hw->mac.type == e1000_pch2lan) &&
421             !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
422                 msec_delay(10);
423                 e1000_gate_hw_phy_config_ich8lan(hw, false);
424         }
425
426         return ret_val;
427 }
428
429 /**
430  *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
431  *  @hw: pointer to the HW structure
432  *
433  *  Initialize family-specific PHY parameters and function pointers.
434  **/
435 STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
436 {
437         struct e1000_phy_info *phy = &hw->phy;
438         s32 ret_val;
439
440         DEBUGFUNC("e1000_init_phy_params_pchlan");
441
442         phy->addr               = 1;
443         phy->reset_delay_us     = 100;
444
445         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
446         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
447         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
448         phy->ops.set_page       = e1000_set_page_igp;
449         phy->ops.read_reg       = e1000_read_phy_reg_hv;
450         phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
451         phy->ops.read_reg_page  = e1000_read_phy_reg_page_hv;
452         phy->ops.release        = e1000_release_swflag_ich8lan;
453         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
454         phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
455         phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
456         phy->ops.write_reg      = e1000_write_phy_reg_hv;
457         phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
458         phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
459         phy->ops.power_up       = e1000_power_up_phy_copper;
460         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
461         phy->autoneg_mask       = AUTONEG_ADVERTISE_SPEED_DEFAULT;
462
463         phy->id = e1000_phy_unknown;
464
465         ret_val = e1000_init_phy_workarounds_pchlan(hw);
466         if (ret_val)
467                 return ret_val;
468
469         if (phy->id == e1000_phy_unknown)
470                 switch (hw->mac.type) {
471                 default:
472                         ret_val = e1000_get_phy_id(hw);
473                         if (ret_val)
474                                 return ret_val;
475                         if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
476                                 break;
477                         /* fall-through */
478                 case e1000_pch2lan:
479                 case e1000_pch_lpt:
480                         /* In case the PHY needs to be in mdio slow mode,
481                          * set slow mode and try to get the PHY id again.
482                          */
483                         ret_val = e1000_set_mdio_slow_mode_hv(hw);
484                         if (ret_val)
485                                 return ret_val;
486                         ret_val = e1000_get_phy_id(hw);
487                         if (ret_val)
488                                 return ret_val;
489                         break;
490                 }
491         phy->type = e1000_get_phy_type_from_id(phy->id);
492
493         switch (phy->type) {
494         case e1000_phy_82577:
495         case e1000_phy_82579:
496         case e1000_phy_i217:
497                 phy->ops.check_polarity = e1000_check_polarity_82577;
498                 phy->ops.force_speed_duplex =
499                         e1000_phy_force_speed_duplex_82577;
500                 phy->ops.get_cable_length = e1000_get_cable_length_82577;
501                 phy->ops.get_info = e1000_get_phy_info_82577;
502                 phy->ops.commit = e1000_phy_sw_reset_generic;
503                 break;
504         case e1000_phy_82578:
505                 phy->ops.check_polarity = e1000_check_polarity_m88;
506                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
507                 phy->ops.get_cable_length = e1000_get_cable_length_m88;
508                 phy->ops.get_info = e1000_get_phy_info_m88;
509                 break;
510         default:
511                 ret_val = -E1000_ERR_PHY;
512                 break;
513         }
514
515         return ret_val;
516 }
517
518 /**
519  *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
520  *  @hw: pointer to the HW structure
521  *
522  *  Initialize family-specific PHY parameters and function pointers.
523  **/
524 STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
525 {
526         struct e1000_phy_info *phy = &hw->phy;
527         s32 ret_val;
528         u16 i = 0;
529
530         DEBUGFUNC("e1000_init_phy_params_ich8lan");
531
532         phy->addr               = 1;
533         phy->reset_delay_us     = 100;
534
535         phy->ops.acquire        = e1000_acquire_swflag_ich8lan;
536         phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
537         phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
538         phy->ops.get_cfg_done   = e1000_get_cfg_done_ich8lan;
539         phy->ops.read_reg       = e1000_read_phy_reg_igp;
540         phy->ops.release        = e1000_release_swflag_ich8lan;
541         phy->ops.reset          = e1000_phy_hw_reset_ich8lan;
542         phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
543         phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
544         phy->ops.write_reg      = e1000_write_phy_reg_igp;
545         phy->ops.power_up       = e1000_power_up_phy_copper;
546         phy->ops.power_down     = e1000_power_down_phy_copper_ich8lan;
547
548         /* We may need to do this twice - once for IGP and if that fails,
549          * we'll set BM func pointers and try again
550          */
551         ret_val = e1000_determine_phy_address(hw);
552         if (ret_val) {
553                 phy->ops.write_reg = e1000_write_phy_reg_bm;
554                 phy->ops.read_reg  = e1000_read_phy_reg_bm;
555                 ret_val = e1000_determine_phy_address(hw);
556                 if (ret_val) {
557                         DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
558                         return ret_val;
559                 }
560         }
561
562         phy->id = 0;
563         while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
564                (i++ < 100)) {
565                 msec_delay(1);
566                 ret_val = e1000_get_phy_id(hw);
567                 if (ret_val)
568                         return ret_val;
569         }
570
571         /* Verify phy id */
572         switch (phy->id) {
573         case IGP03E1000_E_PHY_ID:
574                 phy->type = e1000_phy_igp_3;
575                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
576                 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
577                 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
578                 phy->ops.get_info = e1000_get_phy_info_igp;
579                 phy->ops.check_polarity = e1000_check_polarity_igp;
580                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
581                 break;
582         case IFE_E_PHY_ID:
583         case IFE_PLUS_E_PHY_ID:
584         case IFE_C_E_PHY_ID:
585                 phy->type = e1000_phy_ife;
586                 phy->autoneg_mask = E1000_ALL_NOT_GIG;
587                 phy->ops.get_info = e1000_get_phy_info_ife;
588                 phy->ops.check_polarity = e1000_check_polarity_ife;
589                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
590                 break;
591         case BME1000_E_PHY_ID:
592                 phy->type = e1000_phy_bm;
593                 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
594                 phy->ops.read_reg = e1000_read_phy_reg_bm;
595                 phy->ops.write_reg = e1000_write_phy_reg_bm;
596                 phy->ops.commit = e1000_phy_sw_reset_generic;
597                 phy->ops.get_info = e1000_get_phy_info_m88;
598                 phy->ops.check_polarity = e1000_check_polarity_m88;
599                 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
600                 break;
601         default:
602                 return -E1000_ERR_PHY;
603                 break;
604         }
605
606         return E1000_SUCCESS;
607 }
608
609 /**
610  *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
611  *  @hw: pointer to the HW structure
612  *
613  *  Initialize family-specific NVM parameters and function
614  *  pointers.
615  **/
616 STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
617 {
618         struct e1000_nvm_info *nvm = &hw->nvm;
619         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
620         u32 gfpreg, sector_base_addr, sector_end_addr;
621         u16 i;
622
623         DEBUGFUNC("e1000_init_nvm_params_ich8lan");
624
625         /* Can't read flash registers if the register set isn't mapped. */
626         nvm->type = e1000_nvm_flash_sw;
627         if (!hw->flash_address) {
628                 DEBUGOUT("ERROR: Flash registers not mapped\n");
629                 return -E1000_ERR_CONFIG;
630         }
631
632         gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
633
634         /* sector_X_addr is a "sector"-aligned address (4096 bytes)
635          * Add 1 to sector_end_addr since this sector is included in
636          * the overall size.
637          */
638         sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
639         sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
640
641         /* flash_base_addr is byte-aligned */
642         nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
643
644         /* find total size of the NVM, then cut in half since the total
645          * size represents two separate NVM banks.
646          */
647         nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
648                                 << FLASH_SECTOR_ADDR_SHIFT);
649         nvm->flash_bank_size /= 2;
650         /* Adjust to word count */
651         nvm->flash_bank_size /= sizeof(u16);
652
653         nvm->word_size = E1000_SHADOW_RAM_WORDS;
654
655         /* Clear shadow ram */
656         for (i = 0; i < nvm->word_size; i++) {
657                 dev_spec->shadow_ram[i].modified = false;
658                 dev_spec->shadow_ram[i].value    = 0xFFFF;
659         }
660
661         E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
662         E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
663
664         /* Function Pointers */
665         nvm->ops.acquire        = e1000_acquire_nvm_ich8lan;
666         nvm->ops.release        = e1000_release_nvm_ich8lan;
667         nvm->ops.read           = e1000_read_nvm_ich8lan;
668         nvm->ops.update         = e1000_update_nvm_checksum_ich8lan;
669         nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
670         nvm->ops.validate       = e1000_validate_nvm_checksum_ich8lan;
671         nvm->ops.write          = e1000_write_nvm_ich8lan;
672
673         return E1000_SUCCESS;
674 }
675
676 /**
677  *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
678  *  @hw: pointer to the HW structure
679  *
680  *  Initialize family-specific MAC parameters and function
681  *  pointers.
682  **/
683 STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
684 {
685         struct e1000_mac_info *mac = &hw->mac;
686 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
687         u16 pci_cfg;
688 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
689
690         DEBUGFUNC("e1000_init_mac_params_ich8lan");
691
692         /* Set media type function pointer */
693         hw->phy.media_type = e1000_media_type_copper;
694
695         /* Set mta register count */
696         mac->mta_reg_count = 32;
697         /* Set rar entry count */
698         mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
699         if (mac->type == e1000_ich8lan)
700                 mac->rar_entry_count--;
701         /* Set if part includes ASF firmware */
702         mac->asf_firmware_present = true;
703         /* FWSM register */
704         mac->has_fwsm = true;
705         /* ARC subsystem not supported */
706         mac->arc_subsystem_valid = false;
707         /* Adaptive IFS supported */
708         mac->adaptive_ifs = true;
709
710         /* Function pointers */
711
712         /* bus type/speed/width */
713         mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
714         /* function id */
715         mac->ops.set_lan_id = e1000_set_lan_id_single_port;
716         /* reset */
717         mac->ops.reset_hw = e1000_reset_hw_ich8lan;
718         /* hw initialization */
719         mac->ops.init_hw = e1000_init_hw_ich8lan;
720         /* link setup */
721         mac->ops.setup_link = e1000_setup_link_ich8lan;
722         /* physical interface setup */
723         mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
724         /* check for link */
725         mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
726         /* link info */
727         mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
728         /* multicast address update */
729         mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
730         /* clear hardware counters */
731         mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
732
733         /* LED and other operations */
734         switch (mac->type) {
735         case e1000_ich8lan:
736         case e1000_ich9lan:
737         case e1000_ich10lan:
738                 /* check management mode */
739                 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
740                 /* ID LED init */
741                 mac->ops.id_led_init = e1000_id_led_init_generic;
742                 /* blink LED */
743                 mac->ops.blink_led = e1000_blink_led_generic;
744                 /* setup LED */
745                 mac->ops.setup_led = e1000_setup_led_generic;
746                 /* cleanup LED */
747                 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
748                 /* turn on/off LED */
749                 mac->ops.led_on = e1000_led_on_ich8lan;
750                 mac->ops.led_off = e1000_led_off_ich8lan;
751                 break;
752         case e1000_pch2lan:
753                 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
754                 mac->ops.rar_set = e1000_rar_set_pch2lan;
755                 /* fall-through */
756         case e1000_pch_lpt:
757 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
758                 /* multicast address update for pch2 */
759                 mac->ops.update_mc_addr_list =
760                         e1000_update_mc_addr_list_pch2lan;
761                 /* fall-through */
762 #endif
763         case e1000_pchlan:
764 #if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
765                 /* save PCH revision_id */
766                 e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
767                 hw->revision_id = (u8)(pci_cfg &= 0x000F);
768 #endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
769                 /* check management mode */
770                 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
771                 /* ID LED init */
772                 mac->ops.id_led_init = e1000_id_led_init_pchlan;
773                 /* setup LED */
774                 mac->ops.setup_led = e1000_setup_led_pchlan;
775                 /* cleanup LED */
776                 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
777                 /* turn on/off LED */
778                 mac->ops.led_on = e1000_led_on_pchlan;
779                 mac->ops.led_off = e1000_led_off_pchlan;
780                 break;
781         default:
782                 break;
783         }
784
785         if (mac->type == e1000_pch_lpt) {
786                 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
787                 mac->ops.rar_set = e1000_rar_set_pch_lpt;
788                 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
789         }
790
791         /* Enable PCS Lock-loss workaround for ICH8 */
792         if (mac->type == e1000_ich8lan)
793                 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
794
795         return E1000_SUCCESS;
796 }
797
798 /**
799  *  __e1000_access_emi_reg_locked - Read/write EMI register
800  *  @hw: pointer to the HW structure
801  *  @addr: EMI address to program
802  *  @data: pointer to value to read/write from/to the EMI address
803  *  @read: boolean flag to indicate read or write
804  *
805  *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
806  **/
807 STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
808                                          u16 *data, bool read)
809 {
810         s32 ret_val;
811
812         DEBUGFUNC("__e1000_access_emi_reg_locked");
813
814         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
815         if (ret_val)
816                 return ret_val;
817
818         if (read)
819                 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
820                                                       data);
821         else
822                 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
823                                                        *data);
824
825         return ret_val;
826 }
827
828 /**
829  *  e1000_read_emi_reg_locked - Read Extended Management Interface register
830  *  @hw: pointer to the HW structure
831  *  @addr: EMI address to program
832  *  @data: value to be read from the EMI address
833  *
834  *  Assumes the SW/FW/HW Semaphore is already acquired.
835  **/
836 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
837 {
838         DEBUGFUNC("e1000_read_emi_reg_locked");
839
840         return __e1000_access_emi_reg_locked(hw, addr, data, true);
841 }
842
843 /**
844  *  e1000_write_emi_reg_locked - Write Extended Management Interface register
845  *  @hw: pointer to the HW structure
846  *  @addr: EMI address to program
847  *  @data: value to be written to the EMI address
848  *
849  *  Assumes the SW/FW/HW Semaphore is already acquired.
850  **/
851 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
852 {
853         DEBUGFUNC("e1000_read_emi_reg_locked");
854
855         return __e1000_access_emi_reg_locked(hw, addr, &data, false);
856 }
857
858 /**
859  *  e1000_set_eee_pchlan - Enable/disable EEE support
860  *  @hw: pointer to the HW structure
861  *
862  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
863  *  the link and the EEE capabilities of the link partner.  The LPI Control
864  *  register bits will remain set only if/when link is up.
865  *
866  *  EEE LPI must not be asserted earlier than one second after link is up.
867  *  On 82579, EEE LPI should not be enabled until such time otherwise there
868  *  can be link issues with some switches.  Other devices can have EEE LPI
869  *  enabled immediately upon link up since they have a timer in hardware which
870  *  prevents LPI from being asserted too early.
871  **/
872 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
873 {
874         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
875         s32 ret_val;
876         u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
877
878         DEBUGFUNC("e1000_set_eee_pchlan");
879
880         switch (hw->phy.type) {
881         case e1000_phy_82579:
882                 lpa = I82579_EEE_LP_ABILITY;
883                 pcs_status = I82579_EEE_PCS_STATUS;
884                 adv_addr = I82579_EEE_ADVERTISEMENT;
885                 break;
886         case e1000_phy_i217:
887                 lpa = I217_EEE_LP_ABILITY;
888                 pcs_status = I217_EEE_PCS_STATUS;
889                 adv_addr = I217_EEE_ADVERTISEMENT;
890                 break;
891         default:
892                 return E1000_SUCCESS;
893         }
894
895         ret_val = hw->phy.ops.acquire(hw);
896         if (ret_val)
897                 return ret_val;
898
899         ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
900         if (ret_val)
901                 goto release;
902
903         /* Clear bits that enable EEE in various speeds */
904         lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
905
906         /* Enable EEE if not disabled by user */
907         if (!dev_spec->eee_disable) {
908                 /* Save off link partner's EEE ability */
909                 ret_val = e1000_read_emi_reg_locked(hw, lpa,
910                                                     &dev_spec->eee_lp_ability);
911                 if (ret_val)
912                         goto release;
913
914                 /* Read EEE advertisement */
915                 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
916                 if (ret_val)
917                         goto release;
918
919                 /* Enable EEE only for speeds in which the link partner is
920                  * EEE capable and for which we advertise EEE.
921                  */
922                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
923                         lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
924
925                 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
926                         hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
927                         if (data & NWAY_LPAR_100TX_FD_CAPS)
928                                 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
929                         else
930                                 /* EEE is not supported in 100Half, so ignore
931                                  * partner's EEE in 100 ability if full-duplex
932                                  * is not advertised.
933                                  */
934                                 dev_spec->eee_lp_ability &=
935                                     ~I82579_EEE_100_SUPPORTED;
936                 }
937         }
938
939         if (hw->phy.type == e1000_phy_82579) {
940                 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
941                                                     &data);
942                 if (ret_val)
943                         goto release;
944
945                 data &= ~I82579_LPI_100_PLL_SHUT;
946                 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
947                                                      data);
948         }
949
950         /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
951         ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
952         if (ret_val)
953                 goto release;
954
955         ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
956 release:
957         hw->phy.ops.release(hw);
958
959         return ret_val;
960 }
961
962 /**
963  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
964  *  @hw:   pointer to the HW structure
965  *  @link: link up bool flag
966  *
967  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
968  *  preventing further DMA write requests.  Workaround the issue by disabling
969  *  the de-assertion of the clock request when in 1Gpbs mode.
970  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
971  *  speeds in order to avoid Tx hangs.
972  **/
973 STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
974 {
975         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
976         u32 status = E1000_READ_REG(hw, E1000_STATUS);
977         s32 ret_val = E1000_SUCCESS;
978         u16 reg;
979
980         if (link && (status & E1000_STATUS_SPEED_1000)) {
981                 ret_val = hw->phy.ops.acquire(hw);
982                 if (ret_val)
983                         return ret_val;
984
985                 ret_val =
986                     e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
987                                                &reg);
988                 if (ret_val)
989                         goto release;
990
991                 ret_val =
992                     e1000_write_kmrn_reg_locked(hw,
993                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
994                                                 reg &
995                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
996                 if (ret_val)
997                         goto release;
998
999                 usec_delay(10);
1000
1001                 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1002                                 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1003
1004                 ret_val =
1005                     e1000_write_kmrn_reg_locked(hw,
1006                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
1007                                                 reg);
1008 release:
1009                 hw->phy.ops.release(hw);
1010         } else {
1011                 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1012                 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1013
1014                 if (!link || ((status & E1000_STATUS_SPEED_100) &&
1015                               (status & E1000_STATUS_FD)))
1016                         goto update_fextnvm6;
1017
1018                 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1019                 if (ret_val)
1020                         return ret_val;
1021
1022                 /* Clear link status transmit timeout */
1023                 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1024
1025                 if (status & E1000_STATUS_SPEED_100) {
1026                         /* Set inband Tx timeout to 5x10us for 100Half */
1027                         reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1028
1029                         /* Do not extend the K1 entry latency for 100Half */
1030                         fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1031                 } else {
1032                         /* Set inband Tx timeout to 50x10us for 10Full/Half */
1033                         reg |= 50 <<
1034                                I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1035
1036                         /* Extend the K1 entry latency for 10 Mbps */
1037                         fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1038                 }
1039
1040                 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1041                 if (ret_val)
1042                         return ret_val;
1043
1044 update_fextnvm6:
1045                 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1046         }
1047
1048         return ret_val;
1049 }
1050
1051 #ifdef ULP_SUPPORT
1052 /**
1053  *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1054  *  @hw: pointer to the HW structure
1055  *  @to_sx: boolean indicating a system power state transition to Sx
1056  *
1057  *  When link is down, configure ULP mode to significantly reduce the power
1058  *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1059  *  ME firmware to start the ULP configuration.  If not on an ME enabled
1060  *  system, configure the ULP mode by software.
1061  */
1062 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1063 {
1064         u32 mac_reg;
1065         s32 ret_val = E1000_SUCCESS;
1066         u16 phy_reg;
1067
1068         if ((hw->mac.type < e1000_pch_lpt) ||
1069             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1070             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1071             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1072             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1073             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1074                 return 0;
1075
1076         if (!to_sx) {
1077                 int i = 0;
1078                 /* Poll up to 5 seconds for Cable Disconnected indication */
1079                 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1080                          E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1081                         /* Bail if link is re-acquired */
1082                         if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1083                                 return -E1000_ERR_PHY;
1084                         if (i++ == 100)
1085                                 break;
1086
1087                         msec_delay(50);
1088                 }
1089                 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1090                           (E1000_READ_REG(hw, E1000_FEXT) &
1091                            E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1092                           i * 50);
1093         }
1094
1095         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1096                 /* Request ME configure ULP mode in the PHY */
1097                 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1098                 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1099                 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1100
1101                 goto out;
1102         }
1103
1104         ret_val = hw->phy.ops.acquire(hw);
1105         if (ret_val)
1106                 goto out;
1107
1108         /* During S0 Idle keep the phy in PCI-E mode */
1109         if (hw->dev_spec.ich8lan.smbus_disable)
1110                 goto skip_smbus;
1111
1112         /* Force SMBus mode in PHY */
1113         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1114         if (ret_val)
1115                 goto release;
1116         phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1117         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1118
1119         /* Force SMBus mode in MAC */
1120         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1121         mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1122         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1123
1124 skip_smbus:
1125         if (!to_sx) {
1126                 /* Change the 'Link Status Change' interrupt to trigger
1127                  * on 'Cable Status Change'
1128                  */
1129                 ret_val = e1000_read_kmrn_reg_locked(hw,
1130                                                      E1000_KMRNCTRLSTA_OP_MODES,
1131                                                      &phy_reg);
1132                 if (ret_val)
1133                         goto release;
1134                 phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1135                 e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1136                                             phy_reg);
1137         }
1138
1139         /* Set Inband ULP Exit, Reset to SMBus mode and
1140          * Disable SMBus Release on PERST# in PHY
1141          */
1142         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1143         if (ret_val)
1144                 goto release;
1145         phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1146                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1147         if (to_sx) {
1148                 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1149                         phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1150
1151                 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1152         } else {
1153                 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1154         }
1155         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1156
1157         /* Set Disable SMBus Release on PERST# in MAC */
1158         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1159         mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1160         E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1161
1162         /* Commit ULP changes in PHY by starting auto ULP configuration */
1163         phy_reg |= I218_ULP_CONFIG1_START;
1164         e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1165
1166         if (!to_sx) {
1167                 /* Disable Tx so that the MAC doesn't send any (buffered)
1168                  * packets to the PHY.
1169                  */
1170                 mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1171                 mac_reg &= ~E1000_TCTL_EN;
1172                 E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1173         }
1174 release:
1175         hw->phy.ops.release(hw);
1176 out:
1177         if (ret_val)
1178                 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1179         else
1180                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1181
1182         return ret_val;
1183 }
1184
1185 /**
1186  *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1187  *  @hw: pointer to the HW structure
1188  *  @force: boolean indicating whether or not to force disabling ULP
1189  *
1190  *  Un-configure ULP mode when link is up, the system is transitioned from
1191  *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1192  *  system, poll for an indication from ME that ULP has been un-configured.
1193  *  If not on an ME enabled system, un-configure the ULP mode by software.
1194  *
1195  *  During nominal operation, this function is called when link is acquired
1196  *  to disable ULP mode (force=false); otherwise, for example when unloading
1197  *  the driver or during Sx->S0 transitions, this is called with force=true
1198  *  to forcibly disable ULP.
1199
1200  *  When the cable is plugged in while the device is in D0, a Cable Status
1201  *  Change interrupt is generated which causes this function to be called
1202  *  to partially disable ULP mode and restart autonegotiation.  This function
1203  *  is then called again due to the resulting Link Status Change interrupt
1204  *  to finish cleaning up after the ULP flow.
1205  */
1206 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1207 {
1208         s32 ret_val = E1000_SUCCESS;
1209         u32 mac_reg;
1210         u16 phy_reg;
1211         int i = 0;
1212
1213         if ((hw->mac.type < e1000_pch_lpt) ||
1214             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1215             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1216             (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1217             (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1218             (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1219                 return 0;
1220
1221         if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1222                 if (force) {
1223                         /* Request ME un-configure ULP mode in the PHY */
1224                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1225                         mac_reg &= ~E1000_H2ME_ULP;
1226                         mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1227                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1228                 }
1229
1230                 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1231                 while (E1000_READ_REG(hw, E1000_FWSM) &
1232                        E1000_FWSM_ULP_CFG_DONE) {
1233                         if (i++ == 10) {
1234                                 ret_val = -E1000_ERR_PHY;
1235                                 goto out;
1236                         }
1237
1238                         msec_delay(10);
1239                 }
1240                 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1241
1242                 if (force) {
1243                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1244                         mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1245                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1246                 } else {
1247                         /* Clear H2ME.ULP after ME ULP configuration */
1248                         mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1249                         mac_reg &= ~E1000_H2ME_ULP;
1250                         E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1251
1252                         /* Restore link speed advertisements and restart
1253                          * Auto-negotiation
1254                          */
1255                         if (hw->mac.autoneg) {
1256                                 ret_val = e1000_phy_setup_autoneg(hw);
1257                                 if (ret_val)
1258                                         goto out;
1259                         } else {
1260                                 ret_val = e1000_setup_copper_link_generic(hw);
1261                                 if (ret_val)
1262                                         goto out;
1263                         }
1264                         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1265                 }
1266
1267                 goto out;
1268         }
1269
1270         ret_val = hw->phy.ops.acquire(hw);
1271         if (ret_val)
1272                 goto out;
1273
1274         /* Revert the change to the 'Link Status Change'
1275          * interrupt to trigger on 'Cable Status Change'
1276          */
1277         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1278                                              &phy_reg);
1279         if (ret_val)
1280                 goto release;
1281         phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1282         e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1283
1284         if (force)
1285                 /* Toggle LANPHYPC Value bit */
1286                 e1000_toggle_lanphypc_pch_lpt(hw);
1287
1288         /* Unforce SMBus mode in PHY */
1289         ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1290         if (ret_val) {
1291                 /* The MAC might be in PCIe mode, so temporarily force to
1292                  * SMBus mode in order to access the PHY.
1293                  */
1294                 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1295                 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1296                 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1297
1298                 msec_delay(50);
1299
1300                 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1301                                                        &phy_reg);
1302                 if (ret_val)
1303                         goto release;
1304         }
1305         phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1306         e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1307
1308         /* Unforce SMBus mode in MAC */
1309         mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1310         mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1311         E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1312
1313         /* When ULP mode was previously entered, K1 was disabled by the
1314          * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1315          */
1316         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1317         if (ret_val)
1318                 goto release;
1319         phy_reg |= HV_PM_CTRL_K1_ENABLE;
1320         e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1321
1322         /* Clear ULP enabled configuration */
1323         ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1324         if (ret_val)
1325                 goto release;
1326         /* CSC interrupt received due to ULP Indication */
1327         if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1328                 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1329                              I218_ULP_CONFIG1_STICKY_ULP |
1330                              I218_ULP_CONFIG1_RESET_TO_SMBUS |
1331                              I218_ULP_CONFIG1_WOL_HOST |
1332                              I218_ULP_CONFIG1_INBAND_EXIT |
1333                              I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1334                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1335
1336                 /* Commit ULP changes by starting auto ULP configuration */
1337                 phy_reg |= I218_ULP_CONFIG1_START;
1338                 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1339
1340                 /* Clear Disable SMBus Release on PERST# in MAC */
1341                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1342                 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1343                 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1344
1345                 if (!force) {
1346                         hw->phy.ops.release(hw);
1347
1348                         if (hw->mac.autoneg)
1349                                 e1000_phy_setup_autoneg(hw);
1350
1351                         e1000_sw_lcd_config_ich8lan(hw);
1352
1353                         e1000_oem_bits_config_ich8lan(hw, true);
1354
1355                         /* Set ULP state to unknown and return non-zero to
1356                          * indicate no link (yet) and re-enter on the next LSC
1357                          * to finish disabling ULP flow.
1358                          */
1359                         hw->dev_spec.ich8lan.ulp_state =
1360                             e1000_ulp_state_unknown;
1361
1362                         return 1;
1363                 }
1364         }
1365
1366         /* Re-enable Tx */
1367         mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1368         mac_reg |= E1000_TCTL_EN;
1369         E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1370
1371 release:
1372         hw->phy.ops.release(hw);
1373         if (force) {
1374                 hw->phy.ops.reset(hw);
1375                 msec_delay(50);
1376         }
1377 out:
1378         if (ret_val)
1379                 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1380         else
1381                 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1382
1383         return ret_val;
1384 }
1385
1386 #endif /* ULP_SUPPORT */
1387 /**
1388  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1389  *  @hw: pointer to the HW structure
1390  *
1391  *  Checks to see of the link status of the hardware has changed.  If a
1392  *  change in link status has been detected, then we read the PHY registers
1393  *  to get the current speed/duplex if link exists.
1394  **/
1395 STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1396 {
1397         struct e1000_mac_info *mac = &hw->mac;
1398         s32 ret_val, tipg_reg = 0;
1399         u16 emi_addr, emi_val = 0;
1400         bool link = false;
1401         u16 phy_reg;
1402
1403         DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1404
1405         /* We only want to go out to the PHY registers to see if Auto-Neg
1406          * has completed and/or if our link status has changed.  The
1407          * get_link_status flag is set upon receiving a Link Status
1408          * Change or Rx Sequence Error interrupt.
1409          */
1410         if (!mac->get_link_status)
1411                 return E1000_SUCCESS;
1412
1413         if ((hw->mac.type < e1000_pch_lpt) ||
1414             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1415             (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1416                 /* First we want to see if the MII Status Register reports
1417                  * link.  If so, then we want to get the current speed/duplex
1418                  * of the PHY.
1419                  */
1420                 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1421                 if (ret_val)
1422                         return ret_val;
1423         } else {
1424                 /* Check the MAC's STATUS register to determine link state
1425                  * since the PHY could be inaccessible while in ULP mode.
1426                  */
1427                 link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1428                 if (link)
1429                         ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1430                 else
1431                         ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1432
1433                 if (ret_val)
1434                         return ret_val;
1435         }
1436
1437         if (hw->mac.type == e1000_pchlan) {
1438                 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1439                 if (ret_val)
1440                         return ret_val;
1441         }
1442
1443         /* When connected at 10Mbps half-duplex, some parts are excessively
1444          * aggressive resulting in many collisions. To avoid this, increase
1445          * the IPG and reduce Rx latency in the PHY.
1446          */
1447         if (((hw->mac.type == e1000_pch2lan) ||
1448              (hw->mac.type == e1000_pch_lpt)) && link) {
1449                 u16 speed, duplex;
1450
1451                 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1452                 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1453                 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1454
1455                 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1456                         tipg_reg |= 0xFF;
1457                         /* Reduce Rx latency in analog PHY */
1458                         emi_val = 0;
1459                 } else {
1460                         /* Roll back the default values */
1461                         tipg_reg |= 0x08;
1462                         emi_val = 1;
1463                 }
1464
1465                 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1466
1467                 ret_val = hw->phy.ops.acquire(hw);
1468                 if (ret_val)
1469                         return ret_val;
1470
1471                 if (hw->mac.type == e1000_pch2lan)
1472                         emi_addr = I82579_RX_CONFIG;
1473                 else
1474                         emi_addr = I217_RX_CONFIG;
1475                 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1476
1477                 hw->phy.ops.release(hw);
1478
1479                 if (ret_val)
1480                         return ret_val;
1481         }
1482
1483         /* I217 Packet Loss issue:
1484          * ensure that FEXTNVM4 Beacon Duration is set correctly
1485          * on power up.
1486          * Set the Beacon Duration for I217 to 8 usec
1487          */
1488         if (hw->mac.type == e1000_pch_lpt) {
1489                 u32 mac_reg;
1490
1491                 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1492                 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1493                 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1494                 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1495         }
1496
1497         /* Work-around I218 hang issue */
1498         if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1499             (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1500             (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1501             (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1502                 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1503                 if (ret_val)
1504                         return ret_val;
1505         }
1506         /* Clear link partner's EEE ability */
1507         hw->dev_spec.ich8lan.eee_lp_ability = 0;
1508
1509         /* Configure K0s minimum time */
1510         if (hw->mac.type == e1000_pch_lpt) {
1511                 e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
1512         }
1513
1514         if (!link)
1515                 return E1000_SUCCESS; /* No link detected */
1516
1517         mac->get_link_status = false;
1518
1519         switch (hw->mac.type) {
1520         case e1000_pch2lan:
1521                 ret_val = e1000_k1_workaround_lv(hw);
1522                 if (ret_val)
1523                         return ret_val;
1524                 /* fall-thru */
1525         case e1000_pchlan:
1526                 if (hw->phy.type == e1000_phy_82578) {
1527                         ret_val = e1000_link_stall_workaround_hv(hw);
1528                         if (ret_val)
1529                                 return ret_val;
1530                 }
1531
1532                 /* Workaround for PCHx parts in half-duplex:
1533                  * Set the number of preambles removed from the packet
1534                  * when it is passed from the PHY to the MAC to prevent
1535                  * the MAC from misinterpreting the packet type.
1536                  */
1537                 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1538                 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1539
1540                 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1541                     E1000_STATUS_FD)
1542                         phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1543
1544                 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1545                 break;
1546         default:
1547                 break;
1548         }
1549
1550         /* Check if there was DownShift, must be checked
1551          * immediately after link-up
1552          */
1553         e1000_check_downshift_generic(hw);
1554
1555         /* Enable/Disable EEE after link up */
1556         if (hw->phy.type > e1000_phy_82579) {
1557                 ret_val = e1000_set_eee_pchlan(hw);
1558                 if (ret_val)
1559                         return ret_val;
1560         }
1561
1562         /* If we are forcing speed/duplex, then we simply return since
1563          * we have already determined whether we have link or not.
1564          */
1565         if (!mac->autoneg)
1566                 return -E1000_ERR_CONFIG;
1567
1568         /* Auto-Neg is enabled.  Auto Speed Detection takes care
1569          * of MAC speed/duplex configuration.  So we only need to
1570          * configure Collision Distance in the MAC.
1571          */
1572         mac->ops.config_collision_dist(hw);
1573
1574         /* Configure Flow Control now that Auto-Neg has completed.
1575          * First, we need to restore the desired flow control
1576          * settings because we may have had to re-autoneg with a
1577          * different link partner.
1578          */
1579         ret_val = e1000_config_fc_after_link_up_generic(hw);
1580         if (ret_val)
1581                 DEBUGOUT("Error configuring flow control\n");
1582
1583         return ret_val;
1584 }
1585
1586 /**
1587  *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1588  *  @hw: pointer to the HW structure
1589  *
1590  *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1591  **/
1592 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1593 {
1594         DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1595
1596         hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1597         hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1598         switch (hw->mac.type) {
1599         case e1000_ich8lan:
1600         case e1000_ich9lan:
1601         case e1000_ich10lan:
1602                 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1603                 break;
1604         case e1000_pchlan:
1605         case e1000_pch2lan:
1606         case e1000_pch_lpt:
1607                 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1608                 break;
1609         default:
1610                 break;
1611         }
1612 }
1613
1614 /**
1615  *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1616  *  @hw: pointer to the HW structure
1617  *
1618  *  Acquires the mutex for performing NVM operations.
1619  **/
1620 STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1621 {
1622         DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1623
1624         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1625
1626         return E1000_SUCCESS;
1627 }
1628
1629 /**
1630  *  e1000_release_nvm_ich8lan - Release NVM mutex
1631  *  @hw: pointer to the HW structure
1632  *
1633  *  Releases the mutex used while performing NVM operations.
1634  **/
1635 STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1636 {
1637         DEBUGFUNC("e1000_release_nvm_ich8lan");
1638
1639         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1640
1641         return;
1642 }
1643
1644 /**
1645  *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1646  *  @hw: pointer to the HW structure
1647  *
1648  *  Acquires the software control flag for performing PHY and select
1649  *  MAC CSR accesses.
1650  **/
1651 STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1652 {
1653         u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1654         s32 ret_val = E1000_SUCCESS;
1655
1656         DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1657
1658         E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1659
1660         while (timeout) {
1661                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1662                 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1663                         break;
1664
1665                 msec_delay_irq(1);
1666                 timeout--;
1667         }
1668
1669         if (!timeout) {
1670                 DEBUGOUT("SW has already locked the resource.\n");
1671                 ret_val = -E1000_ERR_CONFIG;
1672                 goto out;
1673         }
1674
1675         timeout = SW_FLAG_TIMEOUT;
1676
1677         extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1678         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1679
1680         while (timeout) {
1681                 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1682                 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1683                         break;
1684
1685                 msec_delay_irq(1);
1686                 timeout--;
1687         }
1688
1689         if (!timeout) {
1690                 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1691                           E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1692                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1693                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1694                 ret_val = -E1000_ERR_CONFIG;
1695                 goto out;
1696         }
1697
1698 out:
1699         if (ret_val)
1700                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1701
1702         return ret_val;
1703 }
1704
1705 /**
1706  *  e1000_release_swflag_ich8lan - Release software control flag
1707  *  @hw: pointer to the HW structure
1708  *
1709  *  Releases the software control flag for performing PHY and select
1710  *  MAC CSR accesses.
1711  **/
1712 STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1713 {
1714         u32 extcnf_ctrl;
1715
1716         DEBUGFUNC("e1000_release_swflag_ich8lan");
1717
1718         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1719
1720         if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1721                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1722                 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1723         } else {
1724                 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1725         }
1726
1727         E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1728
1729         return;
1730 }
1731
1732 /**
1733  *  e1000_check_mng_mode_ich8lan - Checks management mode
1734  *  @hw: pointer to the HW structure
1735  *
1736  *  This checks if the adapter has any manageability enabled.
1737  *  This is a function pointer entry point only called by read/write
1738  *  routines for the PHY and NVM parts.
1739  **/
1740 STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1741 {
1742         u32 fwsm;
1743
1744         DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1745
1746         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1747
1748         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1749                ((fwsm & E1000_FWSM_MODE_MASK) ==
1750                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1751 }
1752
1753 /**
1754  *  e1000_check_mng_mode_pchlan - Checks management mode
1755  *  @hw: pointer to the HW structure
1756  *
1757  *  This checks if the adapter has iAMT enabled.
1758  *  This is a function pointer entry point only called by read/write
1759  *  routines for the PHY and NVM parts.
1760  **/
1761 STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1762 {
1763         u32 fwsm;
1764
1765         DEBUGFUNC("e1000_check_mng_mode_pchlan");
1766
1767         fwsm = E1000_READ_REG(hw, E1000_FWSM);
1768
1769         return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1770                (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1771 }
1772
1773 /**
1774  *  e1000_rar_set_pch2lan - Set receive address register
1775  *  @hw: pointer to the HW structure
1776  *  @addr: pointer to the receive address
1777  *  @index: receive address array register
1778  *
1779  *  Sets the receive address array register at index to the address passed
1780  *  in by addr.  For 82579, RAR[0] is the base address register that is to
1781  *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1782  *  Use SHRA[0-3] in place of those reserved for ME.
1783  **/
1784 STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1785 {
1786         u32 rar_low, rar_high;
1787
1788         DEBUGFUNC("e1000_rar_set_pch2lan");
1789
1790         /* HW expects these in little endian so we reverse the byte order
1791          * from network order (big endian) to little endian
1792          */
1793         rar_low = ((u32) addr[0] |
1794                    ((u32) addr[1] << 8) |
1795                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1796
1797         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1798
1799         /* If MAC address zero, no need to set the AV bit */
1800         if (rar_low || rar_high)
1801                 rar_high |= E1000_RAH_AV;
1802
1803         if (index == 0) {
1804                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1805                 E1000_WRITE_FLUSH(hw);
1806                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1807                 E1000_WRITE_FLUSH(hw);
1808                 return E1000_SUCCESS;
1809         }
1810
1811         /* RAR[1-6] are owned by manageability.  Skip those and program the
1812          * next address into the SHRA register array.
1813          */
1814         if (index < (u32) (hw->mac.rar_entry_count)) {
1815                 s32 ret_val;
1816
1817                 ret_val = e1000_acquire_swflag_ich8lan(hw);
1818                 if (ret_val)
1819                         goto out;
1820
1821                 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
1822                 E1000_WRITE_FLUSH(hw);
1823                 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
1824                 E1000_WRITE_FLUSH(hw);
1825
1826                 e1000_release_swflag_ich8lan(hw);
1827
1828                 /* verify the register updates */
1829                 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
1830                     (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
1831                         return E1000_SUCCESS;
1832
1833                 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1834                          (index - 1), E1000_READ_REG(hw, E1000_FWSM));
1835         }
1836
1837 out:
1838         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1839         return -E1000_ERR_CONFIG;
1840 }
1841
1842 /**
1843  *  e1000_rar_set_pch_lpt - Set receive address registers
1844  *  @hw: pointer to the HW structure
1845  *  @addr: pointer to the receive address
1846  *  @index: receive address array register
1847  *
1848  *  Sets the receive address register array at index to the address passed
1849  *  in by addr. For LPT, RAR[0] is the base address register that is to
1850  *  contain the MAC address. SHRA[0-10] are the shared receive address
1851  *  registers that are shared between the Host and manageability engine (ME).
1852  **/
1853 STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1854 {
1855         u32 rar_low, rar_high;
1856         u32 wlock_mac;
1857
1858         DEBUGFUNC("e1000_rar_set_pch_lpt");
1859
1860         /* HW expects these in little endian so we reverse the byte order
1861          * from network order (big endian) to little endian
1862          */
1863         rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
1864                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1865
1866         rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1867
1868         /* If MAC address zero, no need to set the AV bit */
1869         if (rar_low || rar_high)
1870                 rar_high |= E1000_RAH_AV;
1871
1872         if (index == 0) {
1873                 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1874                 E1000_WRITE_FLUSH(hw);
1875                 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1876                 E1000_WRITE_FLUSH(hw);
1877                 return E1000_SUCCESS;
1878         }
1879
1880         /* The manageability engine (ME) can lock certain SHRAR registers that
1881          * it is using - those registers are unavailable for use.
1882          */
1883         if (index < hw->mac.rar_entry_count) {
1884                 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
1885                             E1000_FWSM_WLOCK_MAC_MASK;
1886                 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1887
1888                 /* Check if all SHRAR registers are locked */
1889                 if (wlock_mac == 1)
1890                         goto out;
1891
1892                 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1893                         s32 ret_val;
1894
1895                         ret_val = e1000_acquire_swflag_ich8lan(hw);
1896
1897                         if (ret_val)
1898                                 goto out;
1899
1900                         E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
1901                                         rar_low);
1902                         E1000_WRITE_FLUSH(hw);
1903                         E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
1904                                         rar_high);
1905                         E1000_WRITE_FLUSH(hw);
1906
1907                         e1000_release_swflag_ich8lan(hw);
1908
1909                         /* verify the register updates */
1910                         if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1911                             (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
1912                                 return E1000_SUCCESS;
1913                 }
1914         }
1915
1916 out:
1917         DEBUGOUT1("Failed to write receive address at index %d\n", index);
1918         return -E1000_ERR_CONFIG;
1919 }
1920
1921 #ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
1922 /**
1923  *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
1924  *  @hw: pointer to the HW structure
1925  *  @mc_addr_list: array of multicast addresses to program
1926  *  @mc_addr_count: number of multicast addresses to program
1927  *
1928  *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
1929  *  The caller must have a packed mc_addr_list of multicast addresses.
1930  **/
1931 STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
1932                                               u8 *mc_addr_list,
1933                                               u32 mc_addr_count)
1934 {
1935         u16 phy_reg = 0;
1936         int i;
1937         s32 ret_val;
1938
1939         DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
1940
1941         e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
1942
1943         ret_val = hw->phy.ops.acquire(hw);
1944         if (ret_val)
1945                 return;
1946
1947         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1948         if (ret_val)
1949                 goto release;
1950
1951         for (i = 0; i < hw->mac.mta_reg_count; i++) {
1952                 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
1953                                            (u16)(hw->mac.mta_shadow[i] &
1954                                                  0xFFFF));
1955                 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
1956                                            (u16)((hw->mac.mta_shadow[i] >> 16) &
1957                                                  0xFFFF));
1958         }
1959
1960         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1961
1962 release:
1963         hw->phy.ops.release(hw);
1964 }
1965
1966 #endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
1967 /**
1968  *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1969  *  @hw: pointer to the HW structure
1970  *
1971  *  Checks if firmware is blocking the reset of the PHY.
1972  *  This is a function pointer entry point only called by
1973  *  reset routines.
1974  **/
1975 STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1976 {
1977         u32 fwsm;
1978         bool blocked = false;
1979         int i = 0;
1980
1981         DEBUGFUNC("e1000_check_reset_block_ich8lan");
1982
1983         do {
1984                 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1985                 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
1986                         blocked = true;
1987                         msec_delay(10);
1988                         continue;
1989                 }
1990                 blocked = false;
1991         } while (blocked && (i++ < 10));
1992         return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
1993 }
1994
1995 /**
1996  *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1997  *  @hw: pointer to the HW structure
1998  *
1999  *  Assumes semaphore already acquired.
2000  *
2001  **/
2002 STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2003 {
2004         u16 phy_data;
2005         u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2006         u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2007                 E1000_STRAP_SMT_FREQ_SHIFT;
2008         s32 ret_val;
2009
2010         strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2011
2012         ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2013         if (ret_val)
2014                 return ret_val;
2015
2016         phy_data &= ~HV_SMB_ADDR_MASK;
2017         phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2018         phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2019
2020         if (hw->phy.type == e1000_phy_i217) {
2021                 /* Restore SMBus frequency */
2022                 if (freq--) {
2023                         phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2024                         phy_data |= (freq & (1 << 0)) <<
2025                                 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2026                         phy_data |= (freq & (1 << 1)) <<
2027                                 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2028                 } else {
2029                         DEBUGOUT("Unsupported SMB frequency in PHY\n");
2030                 }
2031         }
2032
2033         return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2034 }
2035
2036 /**
2037  *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2038  *  @hw:   pointer to the HW structure
2039  *
2040  *  SW should configure the LCD from the NVM extended configuration region
2041  *  as a workaround for certain parts.
2042  **/
2043 STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2044 {
2045         struct e1000_phy_info *phy = &hw->phy;
2046         u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2047         s32 ret_val = E1000_SUCCESS;
2048         u16 word_addr, reg_data, reg_addr, phy_page = 0;
2049
2050         DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2051
2052         /* Initialize the PHY from the NVM on ICH platforms.  This
2053          * is needed due to an issue where the NVM configuration is
2054          * not properly autoloaded after power transitions.
2055          * Therefore, after each PHY reset, we will load the
2056          * configuration data out of the NVM manually.
2057          */
2058         switch (hw->mac.type) {
2059         case e1000_ich8lan:
2060                 if (phy->type != e1000_phy_igp_3)
2061                         return ret_val;
2062
2063                 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2064                     (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2065                         sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2066                         break;
2067                 }
2068                 /* Fall-thru */
2069         case e1000_pchlan:
2070         case e1000_pch2lan:
2071         case e1000_pch_lpt:
2072                 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2073                 break;
2074         default:
2075                 return ret_val;
2076         }
2077
2078         ret_val = hw->phy.ops.acquire(hw);
2079         if (ret_val)
2080                 return ret_val;
2081
2082         data = E1000_READ_REG(hw, E1000_FEXTNVM);
2083         if (!(data & sw_cfg_mask))
2084                 goto release;
2085
2086         /* Make sure HW does not configure LCD from PHY
2087          * extended configuration before SW configuration
2088          */
2089         data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2090         if ((hw->mac.type < e1000_pch2lan) &&
2091             (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2092                         goto release;
2093
2094         cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2095         cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2096         cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2097         if (!cnf_size)
2098                 goto release;
2099
2100         cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2101         cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2102
2103         if (((hw->mac.type == e1000_pchlan) &&
2104              !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2105             (hw->mac.type > e1000_pchlan)) {
2106                 /* HW configures the SMBus address and LEDs when the
2107                  * OEM and LCD Write Enable bits are set in the NVM.
2108                  * When both NVM bits are cleared, SW will configure
2109                  * them instead.
2110                  */
2111                 ret_val = e1000_write_smbus_addr(hw);
2112                 if (ret_val)
2113                         goto release;
2114
2115                 data = E1000_READ_REG(hw, E1000_LEDCTL);
2116                 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2117                                                         (u16)data);
2118                 if (ret_val)
2119                         goto release;
2120         }
2121
2122         /* Configure LCD from extended configuration region. */
2123
2124         /* cnf_base_addr is in DWORD */
2125         word_addr = (u16)(cnf_base_addr << 1);
2126
2127         for (i = 0; i < cnf_size; i++) {
2128                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2129                                            &reg_data);
2130                 if (ret_val)
2131                         goto release;
2132
2133                 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2134                                            1, &reg_addr);
2135                 if (ret_val)
2136                         goto release;
2137
2138                 /* Save off the PHY page for future writes. */
2139                 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2140                         phy_page = reg_data;
2141                         continue;
2142                 }
2143
2144                 reg_addr &= PHY_REG_MASK;
2145                 reg_addr |= phy_page;
2146
2147                 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2148                                                     reg_data);
2149                 if (ret_val)
2150                         goto release;
2151         }
2152
2153 release:
2154         hw->phy.ops.release(hw);
2155         return ret_val;
2156 }
2157
2158 /**
2159  *  e1000_k1_gig_workaround_hv - K1 Si workaround
2160  *  @hw:   pointer to the HW structure
2161  *  @link: link up bool flag
2162  *
2163  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2164  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2165  *  If link is down, the function will restore the default K1 setting located
2166  *  in the NVM.
2167  **/
2168 STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2169 {
2170         s32 ret_val = E1000_SUCCESS;
2171         u16 status_reg = 0;
2172         bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2173
2174         DEBUGFUNC("e1000_k1_gig_workaround_hv");
2175
2176         if (hw->mac.type != e1000_pchlan)
2177                 return E1000_SUCCESS;
2178
2179         /* Wrap the whole flow with the sw flag */
2180         ret_val = hw->phy.ops.acquire(hw);
2181         if (ret_val)
2182                 return ret_val;
2183
2184         /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2185         if (link) {
2186                 if (hw->phy.type == e1000_phy_82578) {
2187                         ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2188                                                               &status_reg);
2189                         if (ret_val)
2190                                 goto release;
2191
2192                         status_reg &= (BM_CS_STATUS_LINK_UP |
2193                                        BM_CS_STATUS_RESOLVED |
2194                                        BM_CS_STATUS_SPEED_MASK);
2195
2196                         if (status_reg == (BM_CS_STATUS_LINK_UP |
2197                                            BM_CS_STATUS_RESOLVED |
2198                                            BM_CS_STATUS_SPEED_1000))
2199                                 k1_enable = false;
2200                 }
2201
2202                 if (hw->phy.type == e1000_phy_82577) {
2203                         ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2204                                                               &status_reg);
2205                         if (ret_val)
2206                                 goto release;
2207
2208                         status_reg &= (HV_M_STATUS_LINK_UP |
2209                                        HV_M_STATUS_AUTONEG_COMPLETE |
2210                                        HV_M_STATUS_SPEED_MASK);
2211
2212                         if (status_reg == (HV_M_STATUS_LINK_UP |
2213                                            HV_M_STATUS_AUTONEG_COMPLETE |
2214                                            HV_M_STATUS_SPEED_1000))
2215                                 k1_enable = false;
2216                 }
2217
2218                 /* Link stall fix for link up */
2219                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2220                                                        0x0100);
2221                 if (ret_val)
2222                         goto release;
2223
2224         } else {
2225                 /* Link stall fix for link down */
2226                 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2227                                                        0x4100);
2228                 if (ret_val)
2229                         goto release;
2230         }
2231
2232         ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2233
2234 release:
2235         hw->phy.ops.release(hw);
2236
2237         return ret_val;
2238 }
2239
2240 /**
2241  *  e1000_configure_k1_ich8lan - Configure K1 power state
2242  *  @hw: pointer to the HW structure
2243  *  @enable: K1 state to configure
2244  *
2245  *  Configure the K1 power state based on the provided parameter.
2246  *  Assumes semaphore already acquired.
2247  *
2248  *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2249  **/
2250 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2251 {
2252         s32 ret_val;
2253         u32 ctrl_reg = 0;
2254         u32 ctrl_ext = 0;
2255         u32 reg = 0;
2256         u16 kmrn_reg = 0;
2257
2258         DEBUGFUNC("e1000_configure_k1_ich8lan");
2259
2260         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2261                                              &kmrn_reg);
2262         if (ret_val)
2263                 return ret_val;
2264
2265         if (k1_enable)
2266                 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2267         else
2268                 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2269
2270         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2271                                               kmrn_reg);
2272         if (ret_val)
2273                 return ret_val;
2274
2275         usec_delay(20);
2276         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2277         ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2278
2279         reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2280         reg |= E1000_CTRL_FRCSPD;
2281         E1000_WRITE_REG(hw, E1000_CTRL, reg);
2282
2283         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2284         E1000_WRITE_FLUSH(hw);
2285         usec_delay(20);
2286         E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2287         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2288         E1000_WRITE_FLUSH(hw);
2289         usec_delay(20);
2290
2291         return E1000_SUCCESS;
2292 }
2293
2294 /**
2295  *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2296  *  @hw:       pointer to the HW structure
2297  *  @d0_state: boolean if entering d0 or d3 device state
2298  *
2299  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2300  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2301  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2302  **/
2303 STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2304 {
2305         s32 ret_val = 0;
2306         u32 mac_reg;
2307         u16 oem_reg;
2308
2309         DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2310
2311         if (hw->mac.type < e1000_pchlan)
2312                 return ret_val;
2313
2314         ret_val = hw->phy.ops.acquire(hw);
2315         if (ret_val)
2316                 return ret_val;
2317
2318         if (hw->mac.type == e1000_pchlan) {
2319                 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2320                 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2321                         goto release;
2322         }
2323
2324         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2325         if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2326                 goto release;
2327
2328         mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2329
2330         ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2331         if (ret_val)
2332                 goto release;
2333
2334         oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2335
2336         if (d0_state) {
2337                 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2338                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2339
2340                 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2341                         oem_reg |= HV_OEM_BITS_LPLU;
2342         } else {
2343                 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2344                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2345                         oem_reg |= HV_OEM_BITS_GBE_DIS;
2346
2347                 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2348                     E1000_PHY_CTRL_NOND0A_LPLU))
2349                         oem_reg |= HV_OEM_BITS_LPLU;
2350         }
2351
2352         /* Set Restart auto-neg to activate the bits */
2353         if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2354             !hw->phy.ops.check_reset_block(hw))
2355                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2356
2357         ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2358
2359 release:
2360         hw->phy.ops.release(hw);
2361
2362         return ret_val;
2363 }
2364
2365
2366 /**
2367  *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2368  *  @hw:   pointer to the HW structure
2369  **/
2370 STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2371 {
2372         s32 ret_val;
2373         u16 data;
2374
2375         DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2376
2377         ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2378         if (ret_val)
2379                 return ret_val;
2380
2381         data |= HV_KMRN_MDIO_SLOW;
2382
2383         ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2384
2385         return ret_val;
2386 }
2387
2388 /**
2389  *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2390  *  done after every PHY reset.
2391  **/
2392 STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2393 {
2394         s32 ret_val = E1000_SUCCESS;
2395         u16 phy_data;
2396
2397         DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2398
2399         if (hw->mac.type != e1000_pchlan)
2400                 return E1000_SUCCESS;
2401
2402         /* Set MDIO slow mode before any other MDIO access */
2403         if (hw->phy.type == e1000_phy_82577) {
2404                 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2405                 if (ret_val)
2406                         return ret_val;
2407         }
2408
2409         if (((hw->phy.type == e1000_phy_82577) &&
2410              ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2411             ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2412                 /* Disable generation of early preamble */
2413                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2414                 if (ret_val)
2415                         return ret_val;
2416
2417                 /* Preamble tuning for SSC */
2418                 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2419                                                 0xA204);
2420                 if (ret_val)
2421                         return ret_val;
2422         }
2423
2424         if (hw->phy.type == e1000_phy_82578) {
2425                 /* Return registers to default by doing a soft reset then
2426                  * writing 0x3140 to the control register.
2427                  */
2428                 if (hw->phy.revision < 2) {
2429                         e1000_phy_sw_reset_generic(hw);
2430                         ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2431                                                         0x3140);
2432                 }
2433         }
2434
2435         /* Select page 0 */
2436         ret_val = hw->phy.ops.acquire(hw);
2437         if (ret_val)
2438                 return ret_val;
2439
2440         hw->phy.addr = 1;
2441         ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2442         hw->phy.ops.release(hw);
2443         if (ret_val)
2444                 return ret_val;
2445
2446         /* Configure the K1 Si workaround during phy reset assuming there is
2447          * link so that it disables K1 if link is in 1Gbps.
2448          */
2449         ret_val = e1000_k1_gig_workaround_hv(hw, true);
2450         if (ret_val)
2451                 return ret_val;
2452
2453         /* Workaround for link disconnects on a busy hub in half duplex */
2454         ret_val = hw->phy.ops.acquire(hw);
2455         if (ret_val)
2456                 return ret_val;
2457         ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2458         if (ret_val)
2459                 goto release;
2460         ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2461                                                phy_data & 0x00FF);
2462         if (ret_val)
2463                 goto release;
2464
2465         /* set MSE higher to enable link to stay up when noise is high */
2466         ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2467 release:
2468         hw->phy.ops.release(hw);
2469
2470         return ret_val;
2471 }
2472
2473 /**
2474  *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2475  *  @hw:   pointer to the HW structure
2476  **/
2477 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2478 {
2479         u32 mac_reg;
2480         u16 i, phy_reg = 0;
2481         s32 ret_val;
2482
2483         DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2484
2485         ret_val = hw->phy.ops.acquire(hw);
2486         if (ret_val)
2487                 return;
2488         ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2489         if (ret_val)
2490                 goto release;
2491
2492         /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2493         for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2494                 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2495                 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2496                                            (u16)(mac_reg & 0xFFFF));
2497                 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2498                                            (u16)((mac_reg >> 16) & 0xFFFF));
2499
2500                 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2501                 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2502                                            (u16)(mac_reg & 0xFFFF));
2503                 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2504                                            (u16)((mac_reg & E1000_RAH_AV)
2505                                                  >> 16));
2506         }
2507
2508         e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2509
2510 release:
2511         hw->phy.ops.release(hw);
2512 }
2513
2514 #ifndef CRC32_OS_SUPPORT
2515 STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2516 {
2517         u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
2518         u32 i, j, mask, crc;
2519
2520         DEBUGFUNC("e1000_calc_rx_da_crc");
2521
2522         crc = 0xffffffff;
2523         for (i = 0; i < 6; i++) {
2524                 crc = crc ^ mac[i];
2525                 for (j = 8; j > 0; j--) {
2526                         mask = (crc & 1) * (-1);
2527                         crc = (crc >> 1) ^ (poly & mask);
2528                 }
2529         }
2530         return ~crc;
2531 }
2532
2533 #endif /* CRC32_OS_SUPPORT */
2534 /**
2535  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2536  *  with 82579 PHY
2537  *  @hw: pointer to the HW structure
2538  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2539  **/
2540 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2541 {
2542         s32 ret_val = E1000_SUCCESS;
2543         u16 phy_reg, data;
2544         u32 mac_reg;
2545         u16 i;
2546
2547         DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2548
2549         if (hw->mac.type < e1000_pch2lan)
2550                 return E1000_SUCCESS;
2551
2552         /* disable Rx path while enabling/disabling workaround */
2553         hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2554         ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2555                                         phy_reg | (1 << 14));
2556         if (ret_val)
2557                 return ret_val;
2558
2559         if (enable) {
2560                 /* Write Rx addresses (rar_entry_count for RAL/H, and
2561                  * SHRAL/H) and initial CRC values to the MAC
2562                  */
2563                 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2564                         u8 mac_addr[ETH_ADDR_LEN] = {0};
2565                         u32 addr_high, addr_low;
2566
2567                         addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2568                         if (!(addr_high & E1000_RAH_AV))
2569                                 continue;
2570                         addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2571                         mac_addr[0] = (addr_low & 0xFF);
2572                         mac_addr[1] = ((addr_low >> 8) & 0xFF);
2573                         mac_addr[2] = ((addr_low >> 16) & 0xFF);
2574                         mac_addr[3] = ((addr_low >> 24) & 0xFF);
2575                         mac_addr[4] = (addr_high & 0xFF);
2576                         mac_addr[5] = ((addr_high >> 8) & 0xFF);
2577
2578 #ifndef CRC32_OS_SUPPORT
2579                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2580                                         e1000_calc_rx_da_crc(mac_addr));
2581 #else /* CRC32_OS_SUPPORT */
2582                         E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2583                                         E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2584 #endif /* CRC32_OS_SUPPORT */
2585                 }
2586
2587                 /* Write Rx addresses to the PHY */
2588                 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2589
2590                 /* Enable jumbo frame workaround in the MAC */
2591                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2592                 mac_reg &= ~(1 << 14);
2593                 mac_reg |= (7 << 15);
2594                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2595
2596                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2597                 mac_reg |= E1000_RCTL_SECRC;
2598                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2599
2600                 ret_val = e1000_read_kmrn_reg_generic(hw,
2601                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2602                                                 &data);
2603                 if (ret_val)
2604                         return ret_val;
2605                 ret_val = e1000_write_kmrn_reg_generic(hw,
2606                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2607                                                 data | (1 << 0));
2608                 if (ret_val)
2609                         return ret_val;
2610                 ret_val = e1000_read_kmrn_reg_generic(hw,
2611                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2612                                                 &data);
2613                 if (ret_val)
2614                         return ret_val;
2615                 data &= ~(0xF << 8);
2616                 data |= (0xB << 8);
2617                 ret_val = e1000_write_kmrn_reg_generic(hw,
2618                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2619                                                 data);
2620                 if (ret_val)
2621                         return ret_val;
2622
2623                 /* Enable jumbo frame workaround in the PHY */
2624                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2625                 data &= ~(0x7F << 5);
2626                 data |= (0x37 << 5);
2627                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2628                 if (ret_val)
2629                         return ret_val;
2630                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2631                 data &= ~(1 << 13);
2632                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2633                 if (ret_val)
2634                         return ret_val;
2635                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2636                 data &= ~(0x3FF << 2);
2637                 data |= (E1000_TX_PTR_GAP << 2);
2638                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2639                 if (ret_val)
2640                         return ret_val;
2641                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2642                 if (ret_val)
2643                         return ret_val;
2644                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2645                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2646                                                 (1 << 10));
2647                 if (ret_val)
2648                         return ret_val;
2649         } else {
2650                 /* Write MAC register values back to h/w defaults */
2651                 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2652                 mac_reg &= ~(0xF << 14);
2653                 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2654
2655                 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2656                 mac_reg &= ~E1000_RCTL_SECRC;
2657                 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2658
2659                 ret_val = e1000_read_kmrn_reg_generic(hw,
2660                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2661                                                 &data);
2662                 if (ret_val)
2663                         return ret_val;
2664                 ret_val = e1000_write_kmrn_reg_generic(hw,
2665                                                 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2666                                                 data & ~(1 << 0));
2667                 if (ret_val)
2668                         return ret_val;
2669                 ret_val = e1000_read_kmrn_reg_generic(hw,
2670                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2671                                                 &data);
2672                 if (ret_val)
2673                         return ret_val;
2674                 data &= ~(0xF << 8);
2675                 data |= (0xB << 8);
2676                 ret_val = e1000_write_kmrn_reg_generic(hw,
2677                                                 E1000_KMRNCTRLSTA_HD_CTRL,
2678                                                 data);
2679                 if (ret_val)
2680                         return ret_val;
2681
2682                 /* Write PHY register values back to h/w defaults */
2683                 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2684                 data &= ~(0x7F << 5);
2685                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2686                 if (ret_val)
2687                         return ret_val;
2688                 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2689                 data |= (1 << 13);
2690                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2691                 if (ret_val)
2692                         return ret_val;
2693                 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2694                 data &= ~(0x3FF << 2);
2695                 data |= (0x8 << 2);
2696                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2697                 if (ret_val)
2698                         return ret_val;
2699                 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2700                 if (ret_val)
2701                         return ret_val;
2702                 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2703                 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2704                                                 ~(1 << 10));
2705                 if (ret_val)
2706                         return ret_val;
2707         }
2708
2709         /* re-enable Rx path after enabling/disabling workaround */
2710         return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2711                                      ~(1 << 14));
2712 }
2713
2714 /**
2715  *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2716  *  done after every PHY reset.
2717  **/
2718 STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2719 {
2720         s32 ret_val = E1000_SUCCESS;
2721
2722         DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2723
2724         if (hw->mac.type != e1000_pch2lan)
2725                 return E1000_SUCCESS;
2726
2727         /* Set MDIO slow mode before any other MDIO access */
2728         ret_val = e1000_set_mdio_slow_mode_hv(hw);
2729         if (ret_val)
2730                 return ret_val;
2731
2732         ret_val = hw->phy.ops.acquire(hw);
2733         if (ret_val)
2734                 return ret_val;
2735         /* set MSE higher to enable link to stay up when noise is high */
2736         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2737         if (ret_val)
2738                 goto release;
2739         /* drop link after 5 times MSE threshold was reached */
2740         ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2741 release:
2742         hw->phy.ops.release(hw);
2743
2744         return ret_val;
2745 }
2746
2747 /**
2748  *  e1000_k1_gig_workaround_lv - K1 Si workaround
2749  *  @hw:   pointer to the HW structure
2750  *
2751  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2752  *  Disable K1 for 1000 and 100 speeds
2753  **/
2754 STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2755 {
2756         s32 ret_val = E1000_SUCCESS;
2757         u16 status_reg = 0;
2758
2759         DEBUGFUNC("e1000_k1_workaround_lv");
2760
2761         if (hw->mac.type != e1000_pch2lan)
2762                 return E1000_SUCCESS;
2763
2764         /* Set K1 beacon duration based on 10Mbs speed */
2765         ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2766         if (ret_val)
2767                 return ret_val;
2768
2769         if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2770             == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2771                 if (status_reg &
2772                     (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2773                         u16 pm_phy_reg;
2774
2775                         /* LV 1G/100 Packet drop issue wa  */
2776                         ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2777                                                        &pm_phy_reg);
2778                         if (ret_val)
2779                                 return ret_val;
2780                         pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2781                         ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2782                                                         pm_phy_reg);
2783                         if (ret_val)
2784                                 return ret_val;
2785                 } else {
2786                         u32 mac_reg;
2787                         mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2788                         mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2789                         mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2790                         E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2791                 }
2792         }
2793
2794         return ret_val;
2795 }
2796
2797 /**
2798  *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2799  *  @hw:   pointer to the HW structure
2800  *  @gate: boolean set to true to gate, false to ungate
2801  *
2802  *  Gate/ungate the automatic PHY configuration via hardware; perform
2803  *  the configuration via software instead.
2804  **/
2805 STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2806 {
2807         u32 extcnf_ctrl;
2808
2809         DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
2810
2811         if (hw->mac.type < e1000_pch2lan)
2812                 return;
2813
2814         extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2815
2816         if (gate)
2817                 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2818         else
2819                 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2820
2821         E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
2822 }
2823
2824 /**
2825  *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2826  *  @hw: pointer to the HW structure
2827  *
2828  *  Check the appropriate indication the MAC has finished configuring the
2829  *  PHY after a software reset.
2830  **/
2831 STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2832 {
2833         u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2834
2835         DEBUGFUNC("e1000_lan_init_done_ich8lan");
2836
2837         /* Wait for basic configuration completes before proceeding */
2838         do {
2839                 data = E1000_READ_REG(hw, E1000_STATUS);
2840                 data &= E1000_STATUS_LAN_INIT_DONE;
2841                 usec_delay(100);
2842         } while ((!data) && --loop);
2843
2844         /* If basic configuration is incomplete before the above loop
2845          * count reaches 0, loading the configuration from NVM will
2846          * leave the PHY in a bad state possibly resulting in no link.
2847          */
2848         if (loop == 0)
2849                 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
2850
2851         /* Clear the Init Done bit for the next init event */
2852         data = E1000_READ_REG(hw, E1000_STATUS);
2853         data &= ~E1000_STATUS_LAN_INIT_DONE;
2854         E1000_WRITE_REG(hw, E1000_STATUS, data);
2855 }
2856
2857 /**
2858  *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2859  *  @hw: pointer to the HW structure
2860  **/
2861 STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2862 {
2863         s32 ret_val = E1000_SUCCESS;
2864         u16 reg;
2865
2866         DEBUGFUNC("e1000_post_phy_reset_ich8lan");
2867
2868         if (hw->phy.ops.check_reset_block(hw))
2869                 return E1000_SUCCESS;
2870
2871         /* Allow time for h/w to get to quiescent state after reset */
2872         msec_delay(10);
2873
2874         /* Perform any necessary post-reset workarounds */
2875         switch (hw->mac.type) {
2876         case e1000_pchlan:
2877                 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2878                 if (ret_val)
2879                         return ret_val;
2880                 break;
2881         case e1000_pch2lan:
2882                 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2883                 if (ret_val)
2884                         return ret_val;
2885                 break;
2886         default:
2887                 break;
2888         }
2889
2890         /* Clear the host wakeup bit after lcd reset */
2891         if (hw->mac.type >= e1000_pchlan) {
2892                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
2893                 reg &= ~BM_WUC_HOST_WU_BIT;
2894                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
2895         }
2896
2897         /* Configure the LCD with the extended configuration region in NVM */
2898         ret_val = e1000_sw_lcd_config_ich8lan(hw);
2899         if (ret_val)
2900                 return ret_val;
2901
2902         /* Configure the LCD with the OEM bits in NVM */
2903         ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2904
2905         if (hw->mac.type == e1000_pch2lan) {
2906                 /* Ungate automatic PHY configuration on non-managed 82579 */
2907                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
2908                     E1000_ICH_FWSM_FW_VALID)) {
2909                         msec_delay(10);
2910                         e1000_gate_hw_phy_config_ich8lan(hw, false);
2911                 }
2912
2913                 /* Set EEE LPI Update Timer to 200usec */
2914                 ret_val = hw->phy.ops.acquire(hw);
2915                 if (ret_val)
2916                         return ret_val;
2917                 ret_val = e1000_write_emi_reg_locked(hw,
2918                                                      I82579_LPI_UPDATE_TIMER,
2919                                                      0x1387);
2920                 hw->phy.ops.release(hw);
2921         }
2922
2923         return ret_val;
2924 }
2925
2926 /**
2927  *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2928  *  @hw: pointer to the HW structure
2929  *
2930  *  Resets the PHY
2931  *  This is a function pointer entry point called by drivers
2932  *  or other shared routines.
2933  **/
2934 STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2935 {
2936         s32 ret_val = E1000_SUCCESS;
2937
2938         DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
2939
2940         /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2941         if ((hw->mac.type == e1000_pch2lan) &&
2942             !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
2943                 e1000_gate_hw_phy_config_ich8lan(hw, true);
2944
2945         ret_val = e1000_phy_hw_reset_generic(hw);
2946         if (ret_val)
2947                 return ret_val;
2948
2949         return e1000_post_phy_reset_ich8lan(hw);
2950 }
2951
2952 /**
2953  *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2954  *  @hw: pointer to the HW structure
2955  *  @active: true to enable LPLU, false to disable
2956  *
2957  *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2958  *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2959  *  the phy speed. This function will manually set the LPLU bit and restart
2960  *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2961  *  since it configures the same bit.
2962  **/
2963 STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2964 {
2965         s32 ret_val;
2966         u16 oem_reg;
2967
2968         DEBUGFUNC("e1000_set_lplu_state_pchlan");
2969
2970         ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
2971         if (ret_val)
2972                 return ret_val;
2973
2974         if (active)
2975                 oem_reg |= HV_OEM_BITS_LPLU;
2976         else
2977                 oem_reg &= ~HV_OEM_BITS_LPLU;
2978
2979         if (!hw->phy.ops.check_reset_block(hw))
2980                 oem_reg |= HV_OEM_BITS_RESTART_AN;
2981
2982         return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
2983 }
2984
2985 /**
2986  *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2987  *  @hw: pointer to the HW structure
2988  *  @active: true to enable LPLU, false to disable
2989  *
2990  *  Sets the LPLU D0 state according to the active flag.  When
2991  *  activating LPLU this function also disables smart speed
2992  *  and vice versa.  LPLU will not be activated unless the
2993  *  device autonegotiation advertisement meets standards of
2994  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2995  *  This is a function pointer entry point only called by
2996  *  PHY setup routines.
2997  **/
2998 STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2999 {
3000         struct e1000_phy_info *phy = &hw->phy;
3001         u32 phy_ctrl;
3002         s32 ret_val = E1000_SUCCESS;
3003         u16 data;
3004
3005         DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3006
3007         if (phy->type == e1000_phy_ife)
3008                 return E1000_SUCCESS;
3009
3010         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3011
3012         if (active) {
3013                 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3014                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3015
3016                 if (phy->type != e1000_phy_igp_3)
3017                         return E1000_SUCCESS;
3018
3019                 /* Call gig speed drop workaround on LPLU before accessing
3020                  * any PHY registers
3021                  */
3022                 if (hw->mac.type == e1000_ich8lan)
3023                         e1000_gig_downshift_workaround_ich8lan(hw);
3024
3025                 /* When LPLU is enabled, we should disable SmartSpeed */
3026                 ret_val = phy->ops.read_reg(hw,
3027                                             IGP01E1000_PHY_PORT_CONFIG,
3028                                             &data);
3029                 if (ret_val)
3030                         return ret_val;
3031                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3032                 ret_val = phy->ops.write_reg(hw,
3033                                              IGP01E1000_PHY_PORT_CONFIG,
3034                                              data);
3035                 if (ret_val)
3036                         return ret_val;
3037         } else {
3038                 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3039                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3040
3041                 if (phy->type != e1000_phy_igp_3)
3042                         return E1000_SUCCESS;
3043
3044                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3045                  * during Dx states where the power conservation is most
3046                  * important.  During driver activity we should enable
3047                  * SmartSpeed, so performance is maintained.
3048                  */
3049                 if (phy->smart_speed == e1000_smart_speed_on) {
3050                         ret_val = phy->ops.read_reg(hw,
3051                                                     IGP01E1000_PHY_PORT_CONFIG,
3052                                                     &data);
3053                         if (ret_val)
3054                                 return ret_val;
3055
3056                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3057                         ret_val = phy->ops.write_reg(hw,
3058                                                      IGP01E1000_PHY_PORT_CONFIG,
3059                                                      data);
3060                         if (ret_val)
3061                                 return ret_val;
3062                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3063                         ret_val = phy->ops.read_reg(hw,
3064                                                     IGP01E1000_PHY_PORT_CONFIG,
3065                                                     &data);
3066                         if (ret_val)
3067                                 return ret_val;
3068
3069                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3070                         ret_val = phy->ops.write_reg(hw,
3071                                                      IGP01E1000_PHY_PORT_CONFIG,
3072                                                      data);
3073                         if (ret_val)
3074                                 return ret_val;
3075                 }
3076         }
3077
3078         return E1000_SUCCESS;
3079 }
3080
3081 /**
3082  *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3083  *  @hw: pointer to the HW structure
3084  *  @active: true to enable LPLU, false to disable
3085  *
3086  *  Sets the LPLU D3 state according to the active flag.  When
3087  *  activating LPLU this function also disables smart speed
3088  *  and vice versa.  LPLU will not be activated unless the
3089  *  device autonegotiation advertisement meets standards of
3090  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3091  *  This is a function pointer entry point only called by
3092  *  PHY setup routines.
3093  **/
3094 STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3095 {
3096         struct e1000_phy_info *phy = &hw->phy;
3097         u32 phy_ctrl;
3098         s32 ret_val = E1000_SUCCESS;
3099         u16 data;
3100
3101         DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3102
3103         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3104
3105         if (!active) {
3106                 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3107                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3108
3109                 if (phy->type != e1000_phy_igp_3)
3110                         return E1000_SUCCESS;
3111
3112                 /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3113                  * during Dx states where the power conservation is most
3114                  * important.  During driver activity we should enable
3115                  * SmartSpeed, so performance is maintained.
3116                  */
3117                 if (phy->smart_speed == e1000_smart_speed_on) {
3118                         ret_val = phy->ops.read_reg(hw,
3119                                                     IGP01E1000_PHY_PORT_CONFIG,
3120                                                     &data);
3121                         if (ret_val)
3122                                 return ret_val;
3123
3124                         data |= IGP01E1000_PSCFR_SMART_SPEED;
3125                         ret_val = phy->ops.write_reg(hw,
3126                                                      IGP01E1000_PHY_PORT_CONFIG,
3127                                                      data);
3128                         if (ret_val)
3129                                 return ret_val;
3130                 } else if (phy->smart_speed == e1000_smart_speed_off) {
3131                         ret_val = phy->ops.read_reg(hw,
3132                                                     IGP01E1000_PHY_PORT_CONFIG,
3133                                                     &data);
3134                         if (ret_val)
3135                                 return ret_val;
3136
3137                         data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3138                         ret_val = phy->ops.write_reg(hw,
3139                                                      IGP01E1000_PHY_PORT_CONFIG,
3140                                                      data);
3141                         if (ret_val)
3142                                 return ret_val;
3143                 }
3144         } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3145                    (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3146                    (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3147                 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3148                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3149
3150                 if (phy->type != e1000_phy_igp_3)
3151                         return E1000_SUCCESS;
3152
3153                 /* Call gig speed drop workaround on LPLU before accessing
3154                  * any PHY registers
3155                  */
3156                 if (hw->mac.type == e1000_ich8lan)
3157                         e1000_gig_downshift_workaround_ich8lan(hw);
3158
3159                 /* When LPLU is enabled, we should disable SmartSpeed */
3160                 ret_val = phy->ops.read_reg(hw,
3161                                             IGP01E1000_PHY_PORT_CONFIG,
3162                                             &data);
3163                 if (ret_val)
3164                         return ret_val;
3165
3166                 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3167                 ret_val = phy->ops.write_reg(hw,
3168                                              IGP01E1000_PHY_PORT_CONFIG,
3169                                              data);
3170         }
3171
3172         return ret_val;
3173 }
3174
3175 /**
3176  *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3177  *  @hw: pointer to the HW structure
3178  *  @bank:  pointer to the variable that returns the active bank
3179  *
3180  *  Reads signature byte from the NVM using the flash access registers.
3181  *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3182  **/
3183 STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3184 {
3185         u32 eecd;
3186         struct e1000_nvm_info *nvm = &hw->nvm;
3187         u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3188         u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3189         u8 sig_byte = 0;
3190         s32 ret_val;
3191
3192         DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3193
3194         switch (hw->mac.type) {
3195         case e1000_ich8lan:
3196         case e1000_ich9lan:
3197                 eecd = E1000_READ_REG(hw, E1000_EECD);
3198                 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3199                     E1000_EECD_SEC1VAL_VALID_MASK) {
3200                         if (eecd & E1000_EECD_SEC1VAL)
3201                                 *bank = 1;
3202                         else
3203                                 *bank = 0;
3204
3205                         return E1000_SUCCESS;
3206                 }
3207                 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3208                 /* fall-thru */
3209         default:
3210                 /* set bank to 0 in case flash read fails */
3211                 *bank = 0;
3212
3213                 /* Check bank 0 */
3214                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3215                                                         &sig_byte);
3216                 if (ret_val)
3217                         return ret_val;
3218                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3219                     E1000_ICH_NVM_SIG_VALUE) {
3220                         *bank = 0;
3221                         return E1000_SUCCESS;
3222                 }
3223
3224                 /* Check bank 1 */
3225                 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3226                                                         bank1_offset,
3227                                                         &sig_byte);
3228                 if (ret_val)
3229                         return ret_val;
3230                 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3231                     E1000_ICH_NVM_SIG_VALUE) {
3232                         *bank = 1;
3233                         return E1000_SUCCESS;
3234                 }
3235
3236                 DEBUGOUT("ERROR: No valid NVM bank present\n");
3237                 return -E1000_ERR_NVM;
3238         }
3239 }
3240
3241 /**
3242  *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3243  *  @hw: pointer to the HW structure
3244  *  @offset: The offset (in bytes) of the word(s) to read.
3245  *  @words: Size of data to read in words
3246  *  @data: Pointer to the word(s) to read at offset.
3247  *
3248  *  Reads a word(s) from the NVM using the flash access registers.
3249  **/
3250 STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3251                                   u16 *data)
3252 {
3253         struct e1000_nvm_info *nvm = &hw->nvm;
3254         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3255         u32 act_offset;
3256         s32 ret_val = E1000_SUCCESS;
3257         u32 bank = 0;
3258         u16 i, word;
3259
3260         DEBUGFUNC("e1000_read_nvm_ich8lan");
3261
3262         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3263             (words == 0)) {
3264                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3265                 ret_val = -E1000_ERR_NVM;
3266                 goto out;
3267         }
3268
3269         nvm->ops.acquire(hw);
3270
3271         ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3272         if (ret_val != E1000_SUCCESS) {
3273                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3274                 bank = 0;
3275         }
3276
3277         act_offset = (bank) ? nvm->flash_bank_size : 0;
3278         act_offset += offset;
3279
3280         ret_val = E1000_SUCCESS;
3281         for (i = 0; i < words; i++) {
3282                 if (dev_spec->shadow_ram[offset+i].modified) {
3283                         data[i] = dev_spec->shadow_ram[offset+i].value;
3284                 } else {
3285                         ret_val = e1000_read_flash_word_ich8lan(hw,
3286                                                                 act_offset + i,
3287                                                                 &word);
3288                         if (ret_val)
3289                                 break;
3290                         data[i] = word;
3291                 }
3292         }
3293
3294         nvm->ops.release(hw);
3295
3296 out:
3297         if (ret_val)
3298                 DEBUGOUT1("NVM read error: %d\n", ret_val);
3299
3300         return ret_val;
3301 }
3302
3303 /**
3304  *  e1000_flash_cycle_init_ich8lan - Initialize flash
3305  *  @hw: pointer to the HW structure
3306  *
3307  *  This function does initial flash setup so that a new read/write/erase cycle
3308  *  can be started.
3309  **/
3310 STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3311 {
3312         union ich8_hws_flash_status hsfsts;
3313         s32 ret_val = -E1000_ERR_NVM;
3314
3315         DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3316
3317         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3318
3319         /* Check if the flash descriptor is valid */
3320         if (!hsfsts.hsf_status.fldesvalid) {
3321                 DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3322                 return -E1000_ERR_NVM;
3323         }
3324
3325         /* Clear FCERR and DAEL in hw status by writing 1 */
3326         hsfsts.hsf_status.flcerr = 1;
3327         hsfsts.hsf_status.dael = 1;
3328         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3329
3330         /* Either we should have a hardware SPI cycle in progress
3331          * bit to check against, in order to start a new cycle or
3332          * FDONE bit should be changed in the hardware so that it
3333          * is 1 after hardware reset, which can then be used as an
3334          * indication whether a cycle is in progress or has been
3335          * completed.
3336          */
3337
3338         if (!hsfsts.hsf_status.flcinprog) {
3339                 /* There is no cycle running at present,
3340                  * so we can start a cycle.
3341                  * Begin by setting Flash Cycle Done.
3342                  */
3343                 hsfsts.hsf_status.flcdone = 1;
3344                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3345                 ret_val = E1000_SUCCESS;
3346         } else {
3347                 s32 i;
3348
3349                 /* Otherwise poll for sometime so the current
3350                  * cycle has a chance to end before giving up.
3351                  */
3352                 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3353                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3354                                                               ICH_FLASH_HSFSTS);
3355                         if (!hsfsts.hsf_status.flcinprog) {
3356                                 ret_val = E1000_SUCCESS;
3357                                 break;
3358                         }
3359                         usec_delay(1);
3360                 }
3361                 if (ret_val == E1000_SUCCESS) {
3362                         /* Successful in waiting for previous cycle to timeout,
3363                          * now set the Flash Cycle Done.
3364                          */
3365                         hsfsts.hsf_status.flcdone = 1;
3366                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3367                                                 hsfsts.regval);
3368                 } else {
3369                         DEBUGOUT("Flash controller busy, cannot get access\n");
3370                 }
3371         }
3372
3373         return ret_val;
3374 }
3375
3376 /**
3377  *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3378  *  @hw: pointer to the HW structure
3379  *  @timeout: maximum time to wait for completion
3380  *
3381  *  This function starts a flash cycle and waits for its completion.
3382  **/
3383 STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3384 {
3385         union ich8_hws_flash_ctrl hsflctl;
3386         union ich8_hws_flash_status hsfsts;
3387         u32 i = 0;
3388
3389         DEBUGFUNC("e1000_flash_cycle_ich8lan");
3390
3391         /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3392         hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3393         hsflctl.hsf_ctrl.flcgo = 1;
3394
3395         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3396
3397         /* wait till FDONE bit is set to 1 */
3398         do {
3399                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3400                 if (hsfsts.hsf_status.flcdone)
3401                         break;
3402                 usec_delay(1);
3403         } while (i++ < timeout);
3404
3405         if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3406                 return E1000_SUCCESS;
3407
3408         return -E1000_ERR_NVM;
3409 }
3410
3411 /**
3412  *  e1000_read_flash_word_ich8lan - Read word from flash
3413  *  @hw: pointer to the HW structure
3414  *  @offset: offset to data location
3415  *  @data: pointer to the location for storing the data
3416  *
3417  *  Reads the flash word at offset into data.  Offset is converted
3418  *  to bytes before read.
3419  **/
3420 STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3421                                          u16 *data)
3422 {
3423         DEBUGFUNC("e1000_read_flash_word_ich8lan");
3424
3425         if (!data)
3426                 return -E1000_ERR_NVM;
3427
3428         /* Must convert offset into bytes. */
3429         offset <<= 1;
3430
3431         return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3432 }
3433
3434 /**
3435  *  e1000_read_flash_byte_ich8lan - Read byte from flash
3436  *  @hw: pointer to the HW structure
3437  *  @offset: The offset of the byte to read.
3438  *  @data: Pointer to a byte to store the value read.
3439  *
3440  *  Reads a single byte from the NVM using the flash access registers.
3441  **/
3442 STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3443                                          u8 *data)
3444 {
3445         s32 ret_val;
3446         u16 word = 0;
3447
3448         ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3449
3450         if (ret_val)
3451                 return ret_val;
3452
3453         *data = (u8)word;
3454
3455         return E1000_SUCCESS;
3456 }
3457
3458 /**
3459  *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3460  *  @hw: pointer to the HW structure
3461  *  @offset: The offset (in bytes) of the byte or word to read.
3462  *  @size: Size of data to read, 1=byte 2=word
3463  *  @data: Pointer to the word to store the value read.
3464  *
3465  *  Reads a byte or word from the NVM using the flash access registers.
3466  **/
3467 STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3468                                          u8 size, u16 *data)
3469 {
3470         union ich8_hws_flash_status hsfsts;
3471         union ich8_hws_flash_ctrl hsflctl;
3472         u32 flash_linear_addr;
3473         u32 flash_data = 0;
3474         s32 ret_val = -E1000_ERR_NVM;
3475         u8 count = 0;
3476
3477         DEBUGFUNC("e1000_read_flash_data_ich8lan");
3478
3479         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3480                 return -E1000_ERR_NVM;
3481         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3482                              hw->nvm.flash_base_addr);
3483
3484         do {
3485                 usec_delay(1);
3486                 /* Steps */
3487                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3488                 if (ret_val != E1000_SUCCESS)
3489                         break;
3490                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3491
3492                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3493                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3494                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3495                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3496
3497                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3498
3499                 ret_val =
3500                     e1000_flash_cycle_ich8lan(hw,
3501                                               ICH_FLASH_READ_COMMAND_TIMEOUT);
3502
3503                 /* Check if FCERR is set to 1, if set to 1, clear it
3504                  * and try the whole sequence a few more times, else
3505                  * read in (shift in) the Flash Data0, the order is
3506                  * least significant byte first msb to lsb
3507                  */
3508                 if (ret_val == E1000_SUCCESS) {
3509                         flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3510                         if (size == 1)
3511                                 *data = (u8)(flash_data & 0x000000FF);
3512                         else if (size == 2)
3513                                 *data = (u16)(flash_data & 0x0000FFFF);
3514                         break;
3515                 } else {
3516                         /* If we've gotten here, then things are probably
3517                          * completely hosed, but if the error condition is
3518                          * detected, it won't hurt to give it another try...
3519                          * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3520                          */
3521                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3522                                                               ICH_FLASH_HSFSTS);
3523                         if (hsfsts.hsf_status.flcerr) {
3524                                 /* Repeat for some time before giving up. */
3525                                 continue;
3526                         } else if (!hsfsts.hsf_status.flcdone) {
3527                                 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3528                                 break;
3529                         }
3530                 }
3531         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3532
3533         return ret_val;
3534 }
3535
3536 /**
3537  *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3538  *  @hw: pointer to the HW structure
3539  *  @offset: The offset (in bytes) of the word(s) to write.
3540  *  @words: Size of data to write in words
3541  *  @data: Pointer to the word(s) to write at offset.
3542  *
3543  *  Writes a byte or word to the NVM using the flash access registers.
3544  **/
3545 STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3546                                    u16 *data)
3547 {
3548         struct e1000_nvm_info *nvm = &hw->nvm;
3549         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3550         u16 i;
3551
3552         DEBUGFUNC("e1000_write_nvm_ich8lan");
3553
3554         if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3555             (words == 0)) {
3556                 DEBUGOUT("nvm parameter(s) out of bounds\n");
3557                 return -E1000_ERR_NVM;
3558         }
3559
3560         nvm->ops.acquire(hw);
3561
3562         for (i = 0; i < words; i++) {
3563                 dev_spec->shadow_ram[offset+i].modified = true;
3564                 dev_spec->shadow_ram[offset+i].value = data[i];
3565         }
3566
3567         nvm->ops.release(hw);
3568
3569         return E1000_SUCCESS;
3570 }
3571
3572 /**
3573  *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3574  *  @hw: pointer to the HW structure
3575  *
3576  *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3577  *  which writes the checksum to the shadow ram.  The changes in the shadow
3578  *  ram are then committed to the EEPROM by processing each bank at a time
3579  *  checking for the modified bit and writing only the pending changes.
3580  *  After a successful commit, the shadow ram is cleared and is ready for
3581  *  future writes.
3582  **/
3583 STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3584 {
3585         struct e1000_nvm_info *nvm = &hw->nvm;
3586         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3587         u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3588         s32 ret_val;
3589         u16 data;
3590
3591         DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
3592
3593         ret_val = e1000_update_nvm_checksum_generic(hw);
3594         if (ret_val)
3595                 goto out;
3596
3597         if (nvm->type != e1000_nvm_flash_sw)
3598                 goto out;
3599
3600         nvm->ops.acquire(hw);
3601
3602         /* We're writing to the opposite bank so if we're on bank 1,
3603          * write to bank 0 etc.  We also need to erase the segment that
3604          * is going to be written
3605          */
3606         ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3607         if (ret_val != E1000_SUCCESS) {
3608                 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3609                 bank = 0;
3610         }
3611
3612         if (bank == 0) {
3613                 new_bank_offset = nvm->flash_bank_size;
3614                 old_bank_offset = 0;
3615                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3616                 if (ret_val)
3617                         goto release;
3618         } else {
3619                 old_bank_offset = nvm->flash_bank_size;
3620                 new_bank_offset = 0;
3621                 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3622                 if (ret_val)
3623                         goto release;
3624         }
3625
3626         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3627                 /* Determine whether to write the value stored
3628                  * in the other NVM bank or a modified value stored
3629                  * in the shadow RAM
3630                  */
3631                 if (dev_spec->shadow_ram[i].modified) {
3632                         data = dev_spec->shadow_ram[i].value;
3633                 } else {
3634                         ret_val = e1000_read_flash_word_ich8lan(hw, i +
3635                                                                 old_bank_offset,
3636                                                                 &data);
3637                         if (ret_val)
3638                                 break;
3639                 }
3640
3641                 /* If the word is 0x13, then make sure the signature bits
3642                  * (15:14) are 11b until the commit has completed.
3643                  * This will allow us to write 10b which indicates the
3644                  * signature is valid.  We want to do this after the write
3645                  * has completed so that we don't mark the segment valid
3646                  * while the write is still in progress
3647                  */
3648                 if (i == E1000_ICH_NVM_SIG_WORD)
3649                         data |= E1000_ICH_NVM_SIG_MASK;
3650
3651                 /* Convert offset to bytes. */
3652                 act_offset = (i + new_bank_offset) << 1;
3653
3654                 usec_delay(100);
3655                 /* Write the bytes to the new bank. */
3656                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3657                                                                act_offset,
3658                                                                (u8)data);
3659                 if (ret_val)
3660                         break;
3661
3662                 usec_delay(100);
3663                 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3664                                                           act_offset + 1,
3665                                                           (u8)(data >> 8));
3666                 if (ret_val)
3667                         break;
3668         }
3669
3670         /* Don't bother writing the segment valid bits if sector
3671          * programming failed.
3672          */
3673         if (ret_val) {
3674                 DEBUGOUT("Flash commit failed.\n");
3675                 goto release;
3676         }
3677
3678         /* Finally validate the new segment by setting bit 15:14
3679          * to 10b in word 0x13 , this can be done without an
3680          * erase as well since these bits are 11 to start with
3681          * and we need to change bit 14 to 0b
3682          */
3683         act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3684         ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3685         if (ret_val)
3686                 goto release;
3687
3688         data &= 0xBFFF;
3689         ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3690                                                        act_offset * 2 + 1,
3691                                                        (u8)(data >> 8));
3692         if (ret_val)
3693                 goto release;
3694
3695         /* And invalidate the previously valid segment by setting
3696          * its signature word (0x13) high_byte to 0b. This can be
3697          * done without an erase because flash erase sets all bits
3698          * to 1's. We can write 1's to 0's without an erase
3699          */
3700         act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3701         ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3702         if (ret_val)
3703                 goto release;
3704
3705         /* Great!  Everything worked, we can now clear the cached entries. */
3706         for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
3707                 dev_spec->shadow_ram[i].modified = false;
3708                 dev_spec->shadow_ram[i].value = 0xFFFF;
3709         }
3710
3711 release:
3712         nvm->ops.release(hw);
3713
3714         /* Reload the EEPROM, or else modifications will not appear
3715          * until after the next adapter reset.
3716          */
3717         if (!ret_val) {
3718                 nvm->ops.reload(hw);
3719                 msec_delay(10);
3720         }
3721
3722 out:
3723         if (ret_val)
3724                 DEBUGOUT1("NVM update error: %d\n", ret_val);
3725
3726         return ret_val;
3727 }
3728
3729 /**
3730  *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3731  *  @hw: pointer to the HW structure
3732  *
3733  *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3734  *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3735  *  calculated, in which case we need to calculate the checksum and set bit 6.
3736  **/
3737 STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3738 {
3739         s32 ret_val;
3740         u16 data;
3741         u16 word;
3742         u16 valid_csum_mask;
3743
3744         DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
3745
3746         /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3747          * the checksum needs to be fixed.  This bit is an indication that
3748          * the NVM was prepared by OEM software and did not calculate
3749          * the checksum...a likely scenario.
3750          */
3751         switch (hw->mac.type) {
3752         case e1000_pch_lpt:
3753                 word = NVM_COMPAT;
3754                 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3755                 break;
3756         default:
3757                 word = NVM_FUTURE_INIT_WORD1;
3758                 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3759                 break;
3760         }
3761
3762         ret_val = hw->nvm.ops.read(hw, word, 1, &data);
3763         if (ret_val)
3764                 return ret_val;
3765
3766         if (!(data & valid_csum_mask)) {
3767                 data |= valid_csum_mask;
3768                 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
3769                 if (ret_val)
3770                         return ret_val;
3771                 ret_val = hw->nvm.ops.update(hw);
3772                 if (ret_val)
3773                         return ret_val;
3774         }
3775
3776         return e1000_validate_nvm_checksum_generic(hw);
3777 }
3778
3779 /**
3780  *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
3781  *  @hw: pointer to the HW structure
3782  *  @offset: The offset (in bytes) of the byte/word to read.
3783  *  @size: Size of data to read, 1=byte 2=word
3784  *  @data: The byte(s) to write to the NVM.
3785  *
3786  *  Writes one/two bytes to the NVM using the flash access registers.
3787  **/
3788 STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3789                                           u8 size, u16 data)
3790 {
3791         union ich8_hws_flash_status hsfsts;
3792         union ich8_hws_flash_ctrl hsflctl;
3793         u32 flash_linear_addr;
3794         u32 flash_data = 0;
3795         s32 ret_val;
3796         u8 count = 0;
3797
3798         DEBUGFUNC("e1000_write_ich8_data");
3799
3800         if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3801                 return -E1000_ERR_NVM;
3802
3803         flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3804                              hw->nvm.flash_base_addr);
3805
3806         do {
3807                 usec_delay(1);
3808                 /* Steps */
3809                 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3810                 if (ret_val != E1000_SUCCESS)
3811                         break;
3812                 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3813
3814                 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3815                 hsflctl.hsf_ctrl.fldbcount = size - 1;
3816                 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3817                 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3818
3819                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3820
3821                 if (size == 1)
3822                         flash_data = (u32)data & 0x00FF;
3823                 else
3824                         flash_data = (u32)data;
3825
3826                 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
3827
3828                 /* check if FCERR is set to 1 , if set to 1, clear it
3829                  * and try the whole sequence a few more times else done
3830                  */
3831                 ret_val =
3832                     e1000_flash_cycle_ich8lan(hw,
3833                                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3834                 if (ret_val == E1000_SUCCESS)
3835                         break;
3836
3837                 /* If we're here, then things are most likely
3838                  * completely hosed, but if the error condition
3839                  * is detected, it won't hurt to give it another
3840                  * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
3841                  */
3842                 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3843                 if (hsfsts.hsf_status.flcerr)
3844                         /* Repeat for some time before giving up. */
3845                         continue;
3846                 if (!hsfsts.hsf_status.flcdone) {
3847                         DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3848                         break;
3849                 }
3850         } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3851
3852         return ret_val;
3853 }
3854
3855 /**
3856  *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
3857  *  @hw: pointer to the HW structure
3858  *  @offset: The index of the byte to read.
3859  *  @data: The byte to write to the NVM.
3860  *
3861  *  Writes a single byte to the NVM using the flash access registers.
3862  **/
3863 STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3864                                           u8 data)
3865 {
3866         u16 word = (u16)data;
3867
3868         DEBUGFUNC("e1000_write_flash_byte_ich8lan");
3869
3870         return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
3871 }
3872
3873 /**
3874  *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
3875  *  @hw: pointer to the HW structure
3876  *  @offset: The offset of the byte to write.
3877  *  @byte: The byte to write to the NVM.
3878  *
3879  *  Writes a single byte to the NVM using the flash access registers.
3880  *  Goes through a retry algorithm before giving up.
3881  **/
3882 STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3883                                                 u32 offset, u8 byte)
3884 {
3885         s32 ret_val;
3886         u16 program_retries;
3887
3888         DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
3889
3890         ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3891         if (!ret_val)
3892                 return ret_val;
3893
3894         for (program_retries = 0; program_retries < 100; program_retries++) {
3895                 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
3896                 usec_delay(100);
3897                 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3898                 if (ret_val == E1000_SUCCESS)
3899                         break;
3900         }
3901         if (program_retries == 100)
3902                 return -E1000_ERR_NVM;
3903
3904         return E1000_SUCCESS;
3905 }
3906
3907 /**
3908  *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
3909  *  @hw: pointer to the HW structure
3910  *  @bank: 0 for first bank, 1 for second bank, etc.
3911  *
3912  *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
3913  *  bank N is 4096 * N + flash_reg_addr.
3914  **/
3915 STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3916 {
3917         struct e1000_nvm_info *nvm = &hw->nvm;
3918         union ich8_hws_flash_status hsfsts;
3919         union ich8_hws_flash_ctrl hsflctl;
3920         u32 flash_linear_addr;
3921         /* bank size is in 16bit words - adjust to bytes */
3922         u32 flash_bank_size = nvm->flash_bank_size * 2;
3923         s32 ret_val;
3924         s32 count = 0;
3925         s32 j, iteration, sector_size;
3926
3927         DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
3928
3929         hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3930
3931         /* Determine HW Sector size: Read BERASE bits of hw flash status
3932          * register
3933          * 00: The Hw sector is 256 bytes, hence we need to erase 16
3934          *     consecutive sectors.  The start index for the nth Hw sector
3935          *     can be calculated as = bank * 4096 + n * 256
3936          * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
3937          *     The start index for the nth Hw sector can be calculated
3938          *     as = bank * 4096
3939          * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
3940          *     (ich9 only, otherwise error condition)
3941          * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
3942          */
3943         switch (hsfsts.hsf_status.berasesz) {
3944         case 0:
3945                 /* Hw sector size 256 */
3946                 sector_size = ICH_FLASH_SEG_SIZE_256;
3947                 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
3948                 break;
3949         case 1:
3950                 sector_size = ICH_FLASH_SEG_SIZE_4K;
3951                 iteration = 1;
3952                 break;
3953         case 2:
3954                 sector_size = ICH_FLASH_SEG_SIZE_8K;
3955                 iteration = 1;
3956                 break;
3957         case 3:
3958                 sector_size = ICH_FLASH_SEG_SIZE_64K;
3959                 iteration = 1;
3960                 break;
3961         default:
3962                 return -E1000_ERR_NVM;
3963         }
3964
3965         /* Start with the base address, then add the sector offset. */
3966         flash_linear_addr = hw->nvm.flash_base_addr;
3967         flash_linear_addr += (bank) ? flash_bank_size : 0;
3968
3969         for (j = 0; j < iteration; j++) {
3970                 do {
3971                         u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3972
3973                         /* Steps */
3974                         ret_val = e1000_flash_cycle_init_ich8lan(hw);
3975                         if (ret_val)
3976                                 return ret_val;
3977
3978                         /* Write a value 11 (block Erase) in Flash
3979                          * Cycle field in hw flash control
3980                          */
3981                         hsflctl.regval =
3982                             E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3983
3984                         hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3985                         E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
3986                                                 hsflctl.regval);
3987
3988                         /* Write the last 24 bits of an index within the
3989                          * block into Flash Linear address field in Flash
3990                          * Address.
3991                          */
3992                         flash_linear_addr += (j * sector_size);
3993                         E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
3994                                               flash_linear_addr);
3995
3996                         ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3997                         if (ret_val == E1000_SUCCESS)
3998                                 break;
3999
4000                         /* Check if FCERR is set to 1.  If 1,
4001                          * clear it and try the whole sequence
4002                          * a few more times else Done
4003                          */
4004                         hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4005                                                       ICH_FLASH_HSFSTS);
4006                         if (hsfsts.hsf_status.flcerr)
4007                                 /* repeat for some time before giving up */
4008                                 continue;
4009                         else if (!hsfsts.hsf_status.flcdone)
4010                                 return ret_val;
4011                 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4012         }
4013
4014         return E1000_SUCCESS;
4015 }
4016
4017 /**
4018  *  e1000_valid_led_default_ich8lan - Set the default LED settings
4019  *  @hw: pointer to the HW structure
4020  *  @data: Pointer to the LED settings
4021  *
4022  *  Reads the LED default settings from the NVM to data.  If the NVM LED
4023  *  settings is all 0's or F's, set the LED default to a valid LED default
4024  *  setting.
4025  **/
4026 STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4027 {
4028         s32 ret_val;
4029
4030         DEBUGFUNC("e1000_valid_led_default_ich8lan");
4031
4032         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4033         if (ret_val) {
4034                 DEBUGOUT("NVM Read Error\n");
4035                 return ret_val;
4036         }
4037
4038         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4039                 *data = ID_LED_DEFAULT_ICH8LAN;
4040
4041         return E1000_SUCCESS;
4042 }
4043
4044 /**
4045  *  e1000_id_led_init_pchlan - store LED configurations
4046  *  @hw: pointer to the HW structure
4047  *
4048  *  PCH does not control LEDs via the LEDCTL register, rather it uses
4049  *  the PHY LED configuration register.
4050  *
4051  *  PCH also does not have an "always on" or "always off" mode which
4052  *  complicates the ID feature.  Instead of using the "on" mode to indicate
4053  *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4054  *  use "link_up" mode.  The LEDs will still ID on request if there is no
4055  *  link based on logic in e1000_led_[on|off]_pchlan().
4056  **/
4057 STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4058 {
4059         struct e1000_mac_info *mac = &hw->mac;
4060         s32 ret_val;
4061         const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4062         const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4063         u16 data, i, temp, shift;
4064
4065         DEBUGFUNC("e1000_id_led_init_pchlan");
4066
4067         /* Get default ID LED modes */
4068         ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4069         if (ret_val)
4070                 return ret_val;
4071
4072         mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4073         mac->ledctl_mode1 = mac->ledctl_default;
4074         mac->ledctl_mode2 = mac->ledctl_default;
4075
4076         for (i = 0; i < 4; i++) {
4077                 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4078                 shift = (i * 5);
4079                 switch (temp) {
4080                 case ID_LED_ON1_DEF2:
4081                 case ID_LED_ON1_ON2:
4082                 case ID_LED_ON1_OFF2:
4083                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4084                         mac->ledctl_mode1 |= (ledctl_on << shift);
4085                         break;
4086                 case ID_LED_OFF1_DEF2:
4087                 case ID_LED_OFF1_ON2:
4088                 case ID_LED_OFF1_OFF2:
4089                         mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4090                         mac->ledctl_mode1 |= (ledctl_off << shift);
4091                         break;
4092                 default:
4093                         /* Do nothing */
4094                         break;
4095                 }
4096                 switch (temp) {
4097                 case ID_LED_DEF1_ON2:
4098                 case ID_LED_ON1_ON2:
4099                 case ID_LED_OFF1_ON2:
4100                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4101                         mac->ledctl_mode2 |= (ledctl_on << shift);
4102                         break;
4103                 case ID_LED_DEF1_OFF2:
4104                 case ID_LED_ON1_OFF2:
4105                 case ID_LED_OFF1_OFF2:
4106                         mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4107                         mac->ledctl_mode2 |= (ledctl_off << shift);
4108                         break;
4109                 default:
4110                         /* Do nothing */
4111                         break;
4112                 }
4113         }
4114
4115         return E1000_SUCCESS;
4116 }
4117
4118 /**
4119  *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4120  *  @hw: pointer to the HW structure
4121  *
4122  *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4123  *  register, so the the bus width is hard coded.
4124  **/
4125 STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4126 {
4127         struct e1000_bus_info *bus = &hw->bus;
4128         s32 ret_val;
4129
4130         DEBUGFUNC("e1000_get_bus_info_ich8lan");
4131
4132         ret_val = e1000_get_bus_info_pcie_generic(hw);
4133
4134         /* ICH devices are "PCI Express"-ish.  They have
4135          * a configuration space, but do not contain
4136          * PCI Express Capability registers, so bus width
4137          * must be hardcoded.
4138          */
4139         if (bus->width == e1000_bus_width_unknown)
4140                 bus->width = e1000_bus_width_pcie_x1;
4141
4142         return ret_val;
4143 }
4144
4145 /**
4146  *  e1000_reset_hw_ich8lan - Reset the hardware
4147  *  @hw: pointer to the HW structure
4148  *
4149  *  Does a full reset of the hardware which includes a reset of the PHY and
4150  *  MAC.
4151  **/
4152 STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4153 {
4154         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4155         u16 kum_cfg;
4156         u32 ctrl, reg;
4157         s32 ret_val;
4158
4159         DEBUGFUNC("e1000_reset_hw_ich8lan");
4160
4161         /* Prevent the PCI-E bus from sticking if there is no TLP connection
4162          * on the last TLP read/write transaction when MAC is reset.
4163          */
4164         ret_val = e1000_disable_pcie_master_generic(hw);
4165         if (ret_val)
4166                 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4167
4168         DEBUGOUT("Masking off all interrupts\n");
4169         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4170
4171         /* Disable the Transmit and Receive units.  Then delay to allow
4172          * any pending transactions to complete before we hit the MAC
4173          * with the global reset.
4174          */
4175         E1000_WRITE_REG(hw, E1000_RCTL, 0);
4176         E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4177         E1000_WRITE_FLUSH(hw);
4178
4179         msec_delay(10);
4180
4181         /* Workaround for ICH8 bit corruption issue in FIFO memory */
4182         if (hw->mac.type == e1000_ich8lan) {
4183                 /* Set Tx and Rx buffer allocation to 8k apiece. */
4184                 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4185                 /* Set Packet Buffer Size to 16k. */
4186                 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4187         }
4188
4189         if (hw->mac.type == e1000_pchlan) {
4190                 /* Save the NVM K1 bit setting*/
4191                 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4192                 if (ret_val)
4193                         return ret_val;
4194
4195                 if (kum_cfg & E1000_NVM_K1_ENABLE)
4196                         dev_spec->nvm_k1_enabled = true;
4197                 else
4198                         dev_spec->nvm_k1_enabled = false;
4199         }
4200
4201         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4202
4203         if (!hw->phy.ops.check_reset_block(hw)) {
4204                 /* Full-chip reset requires MAC and PHY reset at the same
4205                  * time to make sure the interface between MAC and the
4206                  * external PHY is reset.
4207                  */
4208                 ctrl |= E1000_CTRL_PHY_RST;
4209
4210                 /* Gate automatic PHY configuration by hardware on
4211                  * non-managed 82579
4212                  */
4213                 if ((hw->mac.type == e1000_pch2lan) &&
4214                     !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4215                         e1000_gate_hw_phy_config_ich8lan(hw, true);
4216         }
4217         ret_val = e1000_acquire_swflag_ich8lan(hw);
4218         DEBUGOUT("Issuing a global reset to ich8lan\n");
4219         E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4220         /* cannot issue a flush here because it hangs the hardware */
4221         msec_delay(20);
4222
4223         /* Set Phy Config Counter to 50msec */
4224         if (hw->mac.type == e1000_pch2lan) {
4225                 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4226                 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4227                 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4228                 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4229         }
4230
4231         if (!ret_val)
4232                 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
4233
4234         if (ctrl & E1000_CTRL_PHY_RST) {
4235                 ret_val = hw->phy.ops.get_cfg_done(hw);
4236                 if (ret_val)
4237                         return ret_val;
4238
4239                 ret_val = e1000_post_phy_reset_ich8lan(hw);
4240                 if (ret_val)
4241                         return ret_val;
4242         }
4243
4244         /* For PCH, this write will make sure that any noise
4245          * will be detected as a CRC error and be dropped rather than show up
4246          * as a bad packet to the DMA engine.
4247          */
4248         if (hw->mac.type == e1000_pchlan)
4249                 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
4250
4251         E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4252         E1000_READ_REG(hw, E1000_ICR);
4253
4254         reg = E1000_READ_REG(hw, E1000_KABGTXD);
4255         reg |= E1000_KABGTXD_BGSQLBIAS;
4256         E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
4257
4258         return E1000_SUCCESS;
4259 }
4260
4261 /**
4262  *  e1000_init_hw_ich8lan - Initialize the hardware
4263  *  @hw: pointer to the HW structure
4264  *
4265  *  Prepares the hardware for transmit and receive by doing the following:
4266  *   - initialize hardware bits
4267  *   - initialize LED identification
4268  *   - setup receive address registers
4269  *   - setup flow control
4270  *   - setup transmit descriptors
4271  *   - clear statistics
4272  **/
4273 STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4274 {
4275         struct e1000_mac_info *mac = &hw->mac;
4276         u32 ctrl_ext, txdctl, snoop;
4277         s32 ret_val;
4278         u16 i;
4279
4280         DEBUGFUNC("e1000_init_hw_ich8lan");
4281
4282         e1000_initialize_hw_bits_ich8lan(hw);
4283
4284         /* Initialize identification LED */
4285         ret_val = mac->ops.id_led_init(hw);
4286         /* An error is not fatal and we should not stop init due to this */
4287         if (ret_val)
4288                 DEBUGOUT("Error initializing identification LED\n");
4289
4290         /* Setup the receive address. */
4291         e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
4292
4293         /* Zero out the Multicast HASH table */
4294         DEBUGOUT("Zeroing the MTA\n");
4295         for (i = 0; i < mac->mta_reg_count; i++)
4296                 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4297
4298         /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4299          * the ME.  Disable wakeup by clearing the host wakeup bit.
4300          * Reset the phy after disabling host wakeup to reset the Rx buffer.
4301          */
4302         if (hw->phy.type == e1000_phy_82578) {
4303                 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
4304                 i &= ~BM_WUC_HOST_WU_BIT;
4305                 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
4306                 ret_val = e1000_phy_hw_reset_ich8lan(hw);
4307                 if (ret_val)
4308                         return ret_val;
4309         }
4310
4311         /* Setup link and flow control */
4312         ret_val = mac->ops.setup_link(hw);
4313
4314         /* Set the transmit descriptor write-back policy for both queues */
4315         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
4316         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4317                   E1000_TXDCTL_FULL_TX_DESC_WB);
4318         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4319                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4320         E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
4321         txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
4322         txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4323                   E1000_TXDCTL_FULL_TX_DESC_WB);
4324         txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4325                   E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4326         E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
4327
4328         /* ICH8 has opposite polarity of no_snoop bits.
4329          * By default, we should use snoop behavior.
4330          */
4331         if (mac->type == e1000_ich8lan)
4332                 snoop = PCIE_ICH8_SNOOP_ALL;
4333         else
4334                 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
4335         e1000_set_pcie_no_snoop_generic(hw, snoop);
4336
4337         ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
4338         ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4339         E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
4340
4341         /* Clear all of the statistics registers (clear on read).  It is
4342          * important that we do this after we have tried to establish link
4343          * because the symbol error count will increment wildly if there
4344          * is no link.
4345          */
4346         e1000_clear_hw_cntrs_ich8lan(hw);
4347
4348         return ret_val;
4349 }
4350
4351 /**
4352  *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4353  *  @hw: pointer to the HW structure
4354  *
4355  *  Sets/Clears required hardware bits necessary for correctly setting up the
4356  *  hardware for transmit and receive.
4357  **/
4358 STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4359 {
4360         u32 reg;
4361
4362         DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
4363
4364         /* Extended Device Control */
4365         reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
4366         reg |= (1 << 22);
4367         /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4368         if (hw->mac.type >= e1000_pchlan)
4369                 reg |= E1000_CTRL_EXT_PHYPDEN;
4370         E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
4371
4372         /* Transmit Descriptor Control 0 */
4373         reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
4374         reg |= (1 << 22);
4375         E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
4376
4377         /* Transmit Descriptor Control 1 */
4378         reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
4379         reg |= (1 << 22);
4380         E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
4381
4382         /* Transmit Arbitration Control 0 */
4383         reg = E1000_READ_REG(hw, E1000_TARC(0));
4384         if (hw->mac.type == e1000_ich8lan)
4385                 reg |= (1 << 28) | (1 << 29);
4386         reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4387         E1000_WRITE_REG(hw, E1000_TARC(0), reg);
4388
4389         /* Transmit Arbitration Control 1 */
4390         reg = E1000_READ_REG(hw, E1000_TARC(1));
4391         if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
4392                 reg &= ~(1 << 28);
4393         else
4394                 reg |= (1 << 28);
4395         reg |= (1 << 24) | (1 << 26) | (1 << 30);
4396         E1000_WRITE_REG(hw, E1000_TARC(1), reg);
4397
4398         /* Device Status */
4399         if (hw->mac.type == e1000_ich8lan) {
4400                 reg = E1000_READ_REG(hw, E1000_STATUS);
4401                 reg &= ~(1 << 31);
4402                 E1000_WRITE_REG(hw, E1000_STATUS, reg);
4403         }
4404
4405         /* work-around descriptor data corruption issue during nfs v2 udp
4406          * traffic, just disable the nfs filtering capability
4407          */
4408         reg = E1000_READ_REG(hw, E1000_RFCTL);
4409         reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4410
4411         /* Disable IPv6 extension header parsing because some malformed
4412          * IPv6 headers can hang the Rx.
4413          */
4414         if (hw->mac.type == e1000_ich8lan)
4415                 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4416         E1000_WRITE_REG(hw, E1000_RFCTL, reg);
4417
4418         /* Enable ECC on Lynxpoint */
4419         if (hw->mac.type == e1000_pch_lpt) {
4420                 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
4421                 reg |= E1000_PBECCSTS_ECC_ENABLE;
4422                 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
4423
4424                 reg = E1000_READ_REG(hw, E1000_CTRL);
4425                 reg |= E1000_CTRL_MEHE;
4426                 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4427         }
4428
4429         return;
4430 }
4431
4432 /**
4433  *  e1000_setup_link_ich8lan - Setup flow control and link settings
4434  *  @hw: pointer to the HW structure
4435  *
4436  *  Determines which flow control settings to use, then configures flow
4437  *  control.  Calls the appropriate media-specific link configuration
4438  *  function.  Assuming the adapter has a valid link partner, a valid link
4439  *  should be established.  Assumes the hardware has previously been reset
4440  *  and the transmitter and receiver are not enabled.
4441  **/
4442 STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4443 {
4444         s32 ret_val;
4445
4446         DEBUGFUNC("e1000_setup_link_ich8lan");
4447
4448         if (hw->phy.ops.check_reset_block(hw))
4449                 return E1000_SUCCESS;
4450
4451         /* ICH parts do not have a word in the NVM to determine
4452          * the default flow control setting, so we explicitly
4453          * set it to full.
4454          */
4455         if (hw->fc.requested_mode == e1000_fc_default)
4456                 hw->fc.requested_mode = e1000_fc_full;
4457
4458         /* Save off the requested flow control mode for use later.  Depending
4459          * on the link partner's capabilities, we may or may not use this mode.
4460          */
4461         hw->fc.current_mode = hw->fc.requested_mode;
4462
4463         DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
4464                 hw->fc.current_mode);
4465
4466         /* Continue to configure the copper link. */
4467         ret_val = hw->mac.ops.setup_physical_interface(hw);
4468         if (ret_val)
4469                 return ret_val;
4470
4471         E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
4472         if ((hw->phy.type == e1000_phy_82578) ||
4473             (hw->phy.type == e1000_phy_82579) ||
4474             (hw->phy.type == e1000_phy_i217) ||
4475             (hw->phy.type == e1000_phy_82577)) {
4476                 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
4477
4478                 ret_val = hw->phy.ops.write_reg(hw,
4479                                              PHY_REG(BM_PORT_CTRL_PAGE, 27),
4480                                              hw->fc.pause_time);
4481                 if (ret_val)
4482                         return ret_val;
4483         }
4484
4485         return e1000_set_fc_watermarks_generic(hw);
4486 }
4487
4488 /**
4489  *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4490  *  @hw: pointer to the HW structure
4491  *
4492  *  Configures the kumeran interface to the PHY to wait the appropriate time
4493  *  when polling the PHY, then call the generic setup_copper_link to finish
4494  *  configuring the copper link.
4495  **/
4496 STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4497 {
4498         u32 ctrl;
4499         s32 ret_val;
4500         u16 reg_data;
4501
4502         DEBUGFUNC("e1000_setup_copper_link_ich8lan");
4503
4504         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4505         ctrl |= E1000_CTRL_SLU;
4506         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4507         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4508
4509         /* Set the mac to wait the maximum time between each iteration
4510          * and increase the max iterations when polling the phy;
4511          * this fixes erroneous timeouts at 10Mbps.
4512          */
4513         ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
4514                                                0xFFFF);
4515         if (ret_val)
4516                 return ret_val;
4517         ret_val = e1000_read_kmrn_reg_generic(hw,
4518                                               E1000_KMRNCTRLSTA_INBAND_PARAM,
4519                                               &reg_data);
4520         if (ret_val)
4521                 return ret_val;
4522         reg_data |= 0x3F;
4523         ret_val = e1000_write_kmrn_reg_generic(hw,
4524                                                E1000_KMRNCTRLSTA_INBAND_PARAM,
4525                                                reg_data);
4526         if (ret_val)
4527                 return ret_val;
4528
4529         switch (hw->phy.type) {
4530         case e1000_phy_igp_3:
4531                 ret_val = e1000_copper_link_setup_igp(hw);
4532                 if (ret_val)
4533                         return ret_val;
4534                 break;
4535         case e1000_phy_bm:
4536         case e1000_phy_82578:
4537                 ret_val = e1000_copper_link_setup_m88(hw);
4538                 if (ret_val)
4539                         return ret_val;
4540                 break;
4541         case e1000_phy_82577:
4542         case e1000_phy_82579:
4543                 ret_val = e1000_copper_link_setup_82577(hw);
4544                 if (ret_val)
4545                         return ret_val;
4546                 break;
4547         case e1000_phy_ife:
4548                 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
4549                                                &reg_data);
4550                 if (ret_val)
4551                         return ret_val;
4552
4553                 reg_data &= ~IFE_PMC_AUTO_MDIX;
4554
4555                 switch (hw->phy.mdix) {
4556                 case 1:
4557                         reg_data &= ~IFE_PMC_FORCE_MDIX;
4558                         break;
4559                 case 2:
4560                         reg_data |= IFE_PMC_FORCE_MDIX;
4561                         break;
4562                 case 0:
4563                 default:
4564                         reg_data |= IFE_PMC_AUTO_MDIX;
4565                         break;
4566                 }
4567                 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
4568                                                 reg_data);
4569                 if (ret_val)
4570                         return ret_val;
4571                 break;
4572         default:
4573                 break;
4574         }
4575
4576         return e1000_setup_copper_link_generic(hw);
4577 }
4578
4579 /**
4580  *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4581  *  @hw: pointer to the HW structure
4582  *
4583  *  Calls the PHY specific link setup function and then calls the
4584  *  generic setup_copper_link to finish configuring the link for
4585  *  Lynxpoint PCH devices
4586  **/
4587 STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4588 {
4589         u32 ctrl;
4590         s32 ret_val;
4591
4592         DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
4593
4594         ctrl = E1000_READ_REG(hw, E1000_CTRL);
4595         ctrl |= E1000_CTRL_SLU;
4596         ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4597         E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4598
4599         ret_val = e1000_copper_link_setup_82577(hw);
4600         if (ret_val)
4601                 return ret_val;
4602
4603         return e1000_setup_copper_link_generic(hw);
4604 }
4605
4606 /**
4607  *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4608  *  @hw: pointer to the HW structure
4609  *  @speed: pointer to store current link speed
4610  *  @duplex: pointer to store the current link duplex
4611  *
4612  *  Calls the generic get_speed_and_duplex to retrieve the current link
4613  *  information and then calls the Kumeran lock loss workaround for links at
4614  *  gigabit speeds.
4615  **/
4616 STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4617                                           u16 *duplex)
4618 {
4619         s32 ret_val;
4620
4621         DEBUGFUNC("e1000_get_link_up_info_ich8lan");
4622
4623         ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
4624         if (ret_val)
4625                 return ret_val;
4626
4627         if ((hw->mac.type == e1000_ich8lan) &&
4628             (hw->phy.type == e1000_phy_igp_3) &&
4629             (*speed == SPEED_1000)) {
4630                 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4631         }
4632
4633         return ret_val;
4634 }
4635
4636 /**
4637  *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4638  *  @hw: pointer to the HW structure
4639  *
4640  *  Work-around for 82566 Kumeran PCS lock loss:
4641  *  On link status change (i.e. PCI reset, speed change) and link is up and
4642  *  speed is gigabit-
4643  *    0) if workaround is optionally disabled do nothing
4644  *    1) wait 1ms for Kumeran link to come up
4645  *    2) check Kumeran Diagnostic register PCS lock loss bit
4646  *    3) if not set the link is locked (all is good), otherwise...
4647  *    4) reset the PHY
4648  *    5) repeat up to 10 times
4649  *  Note: this is only called for IGP3 copper when speed is 1gb.
4650  **/
4651 STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
4652 {
4653         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4654         u32 phy_ctrl;
4655         s32 ret_val;
4656         u16 i, data;
4657         bool link;
4658
4659         DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
4660
4661         if (!dev_spec->kmrn_lock_loss_workaround_enabled)
4662                 return E1000_SUCCESS;
4663
4664         /* Make sure link is up before proceeding.  If not just return.
4665          * Attempting this while link is negotiating fouled up link
4666          * stability
4667          */
4668         ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
4669         if (!link)
4670                 return E1000_SUCCESS;
4671
4672         for (i = 0; i < 10; i++) {
4673                 /* read once to clear */
4674                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4675                 if (ret_val)
4676                         return ret_val;
4677                 /* and again to get new status */
4678                 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
4679                 if (ret_val)
4680                         return ret_val;
4681
4682                 /* check for PCS lock */
4683                 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
4684                         return E1000_SUCCESS;
4685
4686                 /* Issue PHY reset */
4687                 hw->phy.ops.reset(hw);
4688                 msec_delay_irq(5);
4689         }
4690         /* Disable GigE link negotiation */
4691         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4692         phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
4693                      E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4694         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4695
4696         /* Call gig speed drop workaround on Gig disable before accessing
4697          * any PHY registers
4698          */
4699         e1000_gig_downshift_workaround_ich8lan(hw);
4700
4701         /* unable to acquire PCS lock */
4702         return -E1000_ERR_PHY;
4703 }
4704
4705 /**
4706  *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4707  *  @hw: pointer to the HW structure
4708  *  @state: boolean value used to set the current Kumeran workaround state
4709  *
4710  *  If ICH8, set the current Kumeran workaround state (enabled - true
4711  *  /disabled - false).
4712  **/
4713 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4714                                                  bool state)
4715 {
4716         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4717
4718         DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
4719
4720         if (hw->mac.type != e1000_ich8lan) {
4721                 DEBUGOUT("Workaround applies to ICH8 only.\n");
4722                 return;
4723         }
4724
4725         dev_spec->kmrn_lock_loss_workaround_enabled = state;
4726
4727         return;
4728 }
4729
4730 /**
4731  *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
4732  *  @hw: pointer to the HW structure
4733  *
4734  *  Workaround for 82566 power-down on D3 entry:
4735  *    1) disable gigabit link
4736  *    2) write VR power-down enable
4737  *    3) read it back
4738  *  Continue if successful, else issue LCD reset and repeat
4739  **/
4740 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
4741 {
4742         u32 reg;
4743         u16 data;
4744         u8  retry = 0;
4745
4746         DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
4747
4748         if (hw->phy.type != e1000_phy_igp_3)
4749                 return;
4750
4751         /* Try the workaround twice (if needed) */
4752         do {
4753                 /* Disable link */
4754                 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
4755                 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
4756                         E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
4757                 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
4758
4759                 /* Call gig speed drop workaround on Gig disable before
4760                  * accessing any PHY registers
4761                  */
4762                 if (hw->mac.type == e1000_ich8lan)
4763                         e1000_gig_downshift_workaround_ich8lan(hw);
4764
4765                 /* Write VR power-down enable */
4766                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4767                 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4768                 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
4769                                       data | IGP3_VR_CTRL_MODE_SHUTDOWN);
4770
4771                 /* Read it back and test */
4772                 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
4773                 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
4774                 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
4775                         break;
4776
4777                 /* Issue PHY reset and repeat at most one more time */
4778                 reg = E1000_READ_REG(hw, E1000_CTRL);
4779                 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
4780                 retry++;
4781         } while (retry);
4782 }
4783
4784 /**
4785  *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
4786  *  @hw: pointer to the HW structure
4787  *
4788  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4789  *  LPLU, Gig disable, MDIC PHY reset):
4790  *    1) Set Kumeran Near-end loopback
4791  *    2) Clear Kumeran Near-end loopback
4792  *  Should only be called for ICH8[m] devices with any 1G Phy.
4793  **/
4794 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
4795 {
4796         s32 ret_val;
4797         u16 reg_data;
4798
4799         DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
4800
4801         if ((hw->mac.type != e1000_ich8lan) ||
4802             (hw->phy.type == e1000_phy_ife))
4803                 return;
4804
4805         ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4806                                               &reg_data);
4807         if (ret_val)
4808                 return;
4809         reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
4810         ret_val = e1000_write_kmrn_reg_generic(hw,
4811                                                E1000_KMRNCTRLSTA_DIAG_OFFSET,
4812                                                reg_data);
4813         if (ret_val)
4814                 return;
4815         reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4816         e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4817                                      reg_data);
4818 }
4819
4820 /**
4821  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4822  *  @hw: pointer to the HW structure
4823  *
4824  *  During S0 to Sx transition, it is possible the link remains at gig
4825  *  instead of negotiating to a lower speed.  Before going to Sx, set
4826  *  'Gig Disable' to force link speed negotiation to a lower speed based on
4827  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
4828  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
4829  *  needs to be written.
4830  *  Parts that support (and are linked to a partner which support) EEE in
4831  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4832  *  than 10Mbps w/o EEE.
4833  **/
4834 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4835 {
4836         struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4837         u32 phy_ctrl;
4838         s32 ret_val;
4839
4840         DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
4841
4842         phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
4843         phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4844
4845         if (hw->phy.type == e1000_phy_i217) {
4846                 u16 phy_reg, device_id = hw->device_id;
4847
4848                 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4849                     (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
4850                     (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
4851                     (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4852                         u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
4853
4854                         E1000_WRITE_REG(hw, E1000_FEXTNVM6,
4855                                         fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4856                 }
4857
4858                 ret_val = hw->phy.ops.acquire(hw);
4859                 if (ret_val)
4860                         goto out;
4861
4862                 if (!dev_spec->eee_disable) {
4863                         u16 eee_advert;
4864
4865                         ret_val =
4866                             e1000_read_emi_reg_locked(hw,
4867                                                       I217_EEE_ADVERTISEMENT,
4868                                                       &eee_advert);
4869                         if (ret_val)
4870                                 goto release;
4871
4872                         /* Disable LPLU if both link partners support 100BaseT
4873                          * EEE and 100Full is advertised on both ends of the
4874                          * link, and enable Auto Enable LPI since there will
4875                          * be no driver to enable LPI while in Sx.
4876                          */
4877                         if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
4878                             (dev_spec->eee_lp_ability &
4879                              I82579_EEE_100_SUPPORTED) &&
4880                             (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
4881                                 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4882                                               E1000_PHY_CTRL_NOND0A_LPLU);
4883
4884                                 /* Set Auto Enable LPI after link up */
4885                                 hw->phy.ops.read_reg_locked(hw,
4886                                                             I217_LPI_GPIO_CTRL,
4887                                                             &phy_reg);
4888                                 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4889                                 hw->phy.ops.write_reg_locked(hw,
4890                                                              I217_LPI_GPIO_CTRL,
4891                                                              phy_reg);
4892                         }
4893                 }
4894
4895                 /* For i217 Intel Rapid Start Technology support,
4896                  * when the system is going into Sx and no manageability engine
4897                  * is present, the driver must configure proxy to reset only on
4898                  * power good.  LPI (Low Power Idle) state must also reset only
4899                  * on power good, as well as the MTA (Multicast table array).
4900                  * The SMBus release must also be disabled on LCD reset.
4901                  */
4902                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
4903                       E1000_ICH_FWSM_FW_VALID)) {
4904                         /* Enable proxy to reset only on power good. */
4905                         hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
4906                                                     &phy_reg);
4907                         phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4908                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
4909                                                      phy_reg);
4910
4911                         /* Set bit enable LPI (EEE) to reset only on
4912                          * power good.
4913                         */
4914                         hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
4915                         phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4916                         hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
4917
4918                         /* Disable the SMB release on LCD reset. */
4919                         hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
4920                         phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4921                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
4922                 }
4923
4924                 /* Enable MTA to reset for Intel Rapid Start Technology
4925                  * Support
4926                  */
4927                 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
4928                 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4929                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
4930
4931 release:
4932                 hw->phy.ops.release(hw);
4933         }
4934 out:
4935         E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
4936
4937         if (hw->mac.type == e1000_ich8lan)
4938                 e1000_gig_downshift_workaround_ich8lan(hw);
4939
4940         if (hw->mac.type >= e1000_pchlan) {
4941                 e1000_oem_bits_config_ich8lan(hw, false);
4942
4943                 /* Reset PHY to activate OEM bits on 82577/8 */
4944                 if (hw->mac.type == e1000_pchlan)
4945                         e1000_phy_hw_reset_generic(hw);
4946
4947                 ret_val = hw->phy.ops.acquire(hw);
4948                 if (ret_val)
4949                         return;
4950                 e1000_write_smbus_addr(hw);
4951                 hw->phy.ops.release(hw);
4952         }
4953
4954         return;
4955 }
4956
4957 /**
4958  *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
4959  *  @hw: pointer to the HW structure
4960  *
4961  *  During Sx to S0 transitions on non-managed devices or managed devices
4962  *  on which PHY resets are not blocked, if the PHY registers cannot be
4963  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
4964  *  the PHY.
4965  *  On i217, setup Intel Rapid Start Technology.
4966  **/
4967 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4968 {
4969         s32 ret_val;
4970
4971         DEBUGFUNC("e1000_resume_workarounds_pchlan");
4972         if (hw->mac.type < e1000_pch2lan)
4973                 return E1000_SUCCESS;
4974
4975         ret_val = e1000_init_phy_workarounds_pchlan(hw);
4976         if (ret_val) {
4977                 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
4978                 return ret_val;
4979         }
4980
4981         /* For i217 Intel Rapid Start Technology support when the system
4982          * is transitioning from Sx and no manageability engine is present
4983          * configure SMBus to restore on reset, disable proxy, and enable
4984          * the reset on MTA (Multicast table array).
4985          */
4986         if (hw->phy.type == e1000_phy_i217) {
4987                 u16 phy_reg;
4988
4989                 ret_val = hw->phy.ops.acquire(hw);
4990                 if (ret_val) {
4991                         DEBUGOUT("Failed to setup iRST\n");
4992                         return ret_val;
4993                 }
4994
4995                 /* Clear Auto Enable LPI after link up */
4996                 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
4997                 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
4998                 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
4999
5000                 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5001                     E1000_ICH_FWSM_FW_VALID)) {
5002                         /* Restore clear on SMB if no manageability engine
5003                          * is present
5004                          */
5005                         ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5006                                                               &phy_reg);
5007                         if (ret_val)
5008                                 goto release;
5009                         phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5010                         hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5011
5012                         /* Disable Proxy */
5013                         hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5014                 }
5015                 /* Enable reset on MTA */
5016                 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5017                                                       &phy_reg);
5018                 if (ret_val)
5019                         goto release;
5020                 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5021                 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5022 release:
5023                 if (ret_val)
5024                         DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5025                 hw->phy.ops.release(hw);
5026                 return ret_val;
5027         }
5028         return E1000_SUCCESS;
5029 }
5030
5031 /**
5032  *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5033  *  @hw: pointer to the HW structure
5034  *
5035  *  Return the LED back to the default configuration.
5036  **/
5037 STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5038 {
5039         DEBUGFUNC("e1000_cleanup_led_ich8lan");
5040
5041         if (hw->phy.type == e1000_phy_ife)
5042                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5043                                              0);
5044
5045         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5046         return E1000_SUCCESS;
5047 }
5048
5049 /**
5050  *  e1000_led_on_ich8lan - Turn LEDs on
5051  *  @hw: pointer to the HW structure
5052  *
5053  *  Turn on the LEDs.
5054  **/
5055 STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5056 {
5057         DEBUGFUNC("e1000_led_on_ich8lan");
5058
5059         if (hw->phy.type == e1000_phy_ife)
5060                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5061                                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5062
5063         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5064         return E1000_SUCCESS;
5065 }
5066
5067 /**
5068  *  e1000_led_off_ich8lan - Turn LEDs off
5069  *  @hw: pointer to the HW structure
5070  *
5071  *  Turn off the LEDs.
5072  **/
5073 STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5074 {
5075         DEBUGFUNC("e1000_led_off_ich8lan");
5076
5077         if (hw->phy.type == e1000_phy_ife)
5078                 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5079                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5080
5081         E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5082         return E1000_SUCCESS;
5083 }
5084
5085 /**
5086  *  e1000_setup_led_pchlan - Configures SW controllable LED
5087  *  @hw: pointer to the HW structure
5088  *
5089  *  This prepares the SW controllable LED for use.
5090  **/
5091 STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5092 {
5093         DEBUGFUNC("e1000_setup_led_pchlan");
5094
5095         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5096                                      (u16)hw->mac.ledctl_mode1);
5097 }
5098
5099 /**
5100  *  e1000_cleanup_led_pchlan - Restore the default LED operation
5101  *  @hw: pointer to the HW structure
5102  *
5103  *  Return the LED back to the default configuration.
5104  **/
5105 STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5106 {
5107         DEBUGFUNC("e1000_cleanup_led_pchlan");
5108
5109         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5110                                      (u16)hw->mac.ledctl_default);
5111 }
5112
5113 /**
5114  *  e1000_led_on_pchlan - Turn LEDs on
5115  *  @hw: pointer to the HW structure
5116  *
5117  *  Turn on the LEDs.
5118  **/
5119 STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5120 {
5121         u16 data = (u16)hw->mac.ledctl_mode2;
5122         u32 i, led;
5123
5124         DEBUGFUNC("e1000_led_on_pchlan");
5125
5126         /* If no link, then turn LED on by setting the invert bit
5127          * for each LED that's mode is "link_up" in ledctl_mode2.
5128          */
5129         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5130                 for (i = 0; i < 3; i++) {
5131                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5132                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5133                             E1000_LEDCTL_MODE_LINK_UP)
5134                                 continue;
5135                         if (led & E1000_PHY_LED0_IVRT)
5136                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5137                         else
5138                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5139                 }
5140         }
5141
5142         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5143 }
5144
5145 /**
5146  *  e1000_led_off_pchlan - Turn LEDs off
5147  *  @hw: pointer to the HW structure
5148  *
5149  *  Turn off the LEDs.
5150  **/
5151 STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5152 {
5153         u16 data = (u16)hw->mac.ledctl_mode1;
5154         u32 i, led;
5155
5156         DEBUGFUNC("e1000_led_off_pchlan");
5157
5158         /* If no link, then turn LED off by clearing the invert bit
5159          * for each LED that's mode is "link_up" in ledctl_mode1.
5160          */
5161         if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5162                 for (i = 0; i < 3; i++) {
5163                         led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5164                         if ((led & E1000_PHY_LED0_MODE_MASK) !=
5165                             E1000_LEDCTL_MODE_LINK_UP)
5166                                 continue;
5167                         if (led & E1000_PHY_LED0_IVRT)
5168                                 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5169                         else
5170                                 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5171                 }
5172         }
5173
5174         return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5175 }
5176
5177 /**
5178  *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5179  *  @hw: pointer to the HW structure
5180  *
5181  *  Read appropriate register for the config done bit for completion status
5182  *  and configure the PHY through s/w for EEPROM-less parts.
5183  *
5184  *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5185  *  config done bit, so only an error is logged and continues.  If we were
5186  *  to return with error, EEPROM-less silicon would not be able to be reset
5187  *  or change link.
5188  **/
5189 STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5190 {
5191         s32 ret_val = E1000_SUCCESS;
5192         u32 bank = 0;
5193         u32 status;
5194
5195         DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5196
5197         e1000_get_cfg_done_generic(hw);
5198
5199         /* Wait for indication from h/w that it has completed basic config */
5200         if (hw->mac.type >= e1000_ich10lan) {
5201                 e1000_lan_init_done_ich8lan(hw);
5202         } else {
5203                 ret_val = e1000_get_auto_rd_done_generic(hw);
5204                 if (ret_val) {
5205                         /* When auto config read does not complete, do not
5206                          * return with an error. This can happen in situations
5207                          * where there is no eeprom and prevents getting link.
5208                          */
5209                         DEBUGOUT("Auto Read Done did not complete\n");
5210                         ret_val = E1000_SUCCESS;
5211                 }
5212         }
5213
5214         /* Clear PHY Reset Asserted bit */
5215         status = E1000_READ_REG(hw, E1000_STATUS);
5216         if (status & E1000_STATUS_PHYRA)
5217                 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5218         else
5219                 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5220
5221         /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5222         if (hw->mac.type <= e1000_ich9lan) {
5223                 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5224                     (hw->phy.type == e1000_phy_igp_3)) {
5225                         e1000_phy_init_script_igp3(hw);
5226                 }
5227         } else {
5228                 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5229                         /* Maybe we should do a basic PHY config */
5230                         DEBUGOUT("EEPROM not present\n");
5231                         ret_val = -E1000_ERR_CONFIG;
5232                 }
5233         }
5234
5235         return ret_val;
5236 }
5237
5238 /**
5239  * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5240  * @hw: pointer to the HW structure
5241  *
5242  * In the case of a PHY power down to save power, or to turn off link during a
5243  * driver unload, or wake on lan is not enabled, remove the link.
5244  **/
5245 STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5246 {
5247         /* If the management interface is not enabled, then power down */
5248         if (!(hw->mac.ops.check_mng_mode(hw) ||
5249               hw->phy.ops.check_reset_block(hw)))
5250                 e1000_power_down_phy_copper(hw);
5251
5252         return;
5253 }
5254
5255 /**
5256  *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5257  *  @hw: pointer to the HW structure
5258  *
5259  *  Clears hardware counters specific to the silicon family and calls
5260  *  clear_hw_cntrs_generic to clear all general purpose counters.
5261  **/
5262 STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5263 {
5264         u16 phy_data;
5265         s32 ret_val;
5266
5267         DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
5268
5269         e1000_clear_hw_cntrs_base_generic(hw);
5270
5271         E1000_READ_REG(hw, E1000_ALGNERRC);
5272         E1000_READ_REG(hw, E1000_RXERRC);
5273         E1000_READ_REG(hw, E1000_TNCRS);
5274         E1000_READ_REG(hw, E1000_CEXTERR);
5275         E1000_READ_REG(hw, E1000_TSCTC);
5276         E1000_READ_REG(hw, E1000_TSCTFC);
5277
5278         E1000_READ_REG(hw, E1000_MGTPRC);
5279         E1000_READ_REG(hw, E1000_MGTPDC);
5280         E1000_READ_REG(hw, E1000_MGTPTC);
5281
5282         E1000_READ_REG(hw, E1000_IAC);
5283         E1000_READ_REG(hw, E1000_ICRXOC);
5284
5285         /* Clear PHY statistics registers */
5286         if ((hw->phy.type == e1000_phy_82578) ||
5287             (hw->phy.type == e1000_phy_82579) ||
5288             (hw->phy.type == e1000_phy_i217) ||
5289             (hw->phy.type == e1000_phy_82577)) {
5290                 ret_val = hw->phy.ops.acquire(hw);
5291                 if (ret_val)
5292                         return;
5293                 ret_val = hw->phy.ops.set_page(hw,
5294                                                HV_STATS_PAGE << IGP_PAGE_SHIFT);
5295                 if (ret_val)
5296                         goto release;
5297                 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5298                 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5299                 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5300                 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5301                 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5302                 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5303                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5304                 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5305                 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5306                 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5307                 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5308                 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5309                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5310                 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5311 release:
5312                 hw->phy.ops.release(hw);
5313         }
5314 }
5315
5316 /**
5317  *  e1000_configure_k0s_lpt - Configure K0s power state
5318  *  @hw: pointer to the HW structure
5319  *  @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
5320  *      0 corresponds to 128ns, each value over 0 doubles the duration.
5321  *  @min_time: Minimum Tx idle period allowed  - valid values are 0 to 4.
5322  *      0 corresponds to 128ns, each value over 0 doubles the duration.
5323  *
5324  *  Configure the K1 power state based on the provided parameter.
5325  *  Assumes semaphore already acquired.
5326  *
5327  *  Success returns 0, Failure returns:
5328  *      -E1000_ERR_PHY (-2) in case of access error
5329  *      -E1000_ERR_PARAM (-4) in case of parameters error
5330  **/
5331 s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
5332 {
5333         s32 ret_val;
5334         u16 kmrn_reg = 0;
5335
5336         DEBUGFUNC("e1000_configure_k0s_lpt");
5337
5338         if (entry_latency > 3 || min_time > 4)
5339                 return -E1000_ERR_PARAM;
5340
5341         ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
5342                                              &kmrn_reg);
5343         if (ret_val)
5344                 return ret_val;
5345
5346         /* for now don't touch the latency */
5347         kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
5348         kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
5349
5350         ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
5351                                               kmrn_reg);
5352         if (ret_val)
5353                 return ret_val;
5354
5355         return E1000_SUCCESS;
5356 }